Merge "feat(mt8189): add support display driver" into integration
diff --git a/Makefile b/Makefile
index 2da1906..125d587 100644
--- a/Makefile
+++ b/Makefile
@@ -647,7 +647,6 @@
PSA_FWU_SUPPORT \
PSA_FWU_METADATA_FW_STORE_DESC \
ENABLE_MPMM \
- FEAT_PABANDON \
FEATURE_DETECTION \
TRNG_SUPPORT \
ENABLE_ERRATA_ALL \
@@ -856,7 +855,6 @@
ENABLE_TRF_FOR_NS \
ENABLE_FEAT_HCX \
ENABLE_MPMM \
- FEAT_PABANDON \
ENABLE_FEAT_FGT \
ENABLE_FEAT_FGT2 \
ENABLE_FEAT_FGWTE3 \
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index 4d641d3..b8c915a 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -97,6 +97,11 @@
void bl31_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
u_register_t arg3)
{
+#if FEATURE_DETECTION
+ /* Detect if features enabled during compilation are supported by PE. */
+ detect_arch_features(plat_my_core_pos());
+#endif /* FEATURE_DETECTION */
+
/* Enable early console if EARLY_CONSOLE flag is enabled */
plat_setup_early_console();
@@ -120,8 +125,10 @@
******************************************************************************/
void bl31_main(void)
{
+ unsigned int core_pos = plat_my_core_pos();
+
/* Init registers that never change for the lifetime of TF-A */
- cm_manage_extensions_el3(plat_my_core_pos());
+ cm_manage_extensions_el3(core_pos);
/* Init per-world context registers */
cm_manage_extensions_per_world();
@@ -129,11 +136,6 @@
NOTICE("BL31: %s\n", build_version_string);
NOTICE("BL31: %s\n", build_message);
-#if FEATURE_DETECTION
- /* Detect if features enabled during compilation are supported by PE. */
- detect_arch_features();
-#endif /* FEATURE_DETECTION */
-
#if ENABLE_RUNTIME_INSTRUMENTATION
PMF_CAPTURE_TIMESTAMP(bl_svc, BL31_ENTRY, PMF_CACHE_MAINT);
#endif
@@ -156,8 +158,6 @@
* Initialize the GIC driver as well as per-cpu and global interfaces.
* Platform has had an opportunity to initialise specifics.
*/
- unsigned int core_pos = plat_my_core_pos();
-
gic_init(core_pos);
gic_pcpu_init(core_pos);
gic_cpuif_enable(core_pos);
diff --git a/common/feat_detect.c b/common/feat_detect.c
index d7adae6..5f70397 100644
--- a/common/feat_detect.c
+++ b/common/feat_detect.c
@@ -7,8 +7,9 @@
#include <arch_features.h>
#include <common/debug.h>
#include <common/feat_detect.h>
+#include <plat/common/platform.h>
-static bool tainted;
+static bool detection_done[PLATFORM_CORE_COUNT] = { false };
/*******************************************************************************
* This section lists the wrapper modules for each feature to evaluate the
@@ -45,29 +46,21 @@
* We force inlining here to let the compiler optimise away the whole check
* if the feature is disabled at build time (FEAT_STATE_DISABLED).
******************************************************************************/
-static inline void __attribute((__always_inline__))
+static inline bool __attribute((__always_inline__))
check_feature(int state, unsigned long field, const char *feat_name,
unsigned int min, unsigned int max)
{
if (state == FEAT_STATE_ALWAYS && field < min) {
ERROR("FEAT_%s not supported by the PE\n", feat_name);
- tainted = true;
+ return true;
}
if (state >= FEAT_STATE_ALWAYS && field > max) {
ERROR("FEAT_%s is version %ld, but is only known up to version %d\n",
feat_name, field, max);
- tainted = true;
+ return true;
}
-}
-/************************************************
- * Feature : FEAT_PAUTH (Pointer Authentication)
- ***********************************************/
-static void read_feat_pauth(void)
-{
-#if (ENABLE_PAUTH == FEAT_STATE_ALWAYS) || (CTX_INCLUDE_PAUTH_REGS == FEAT_STATE_ALWAYS)
- feat_detect_panic(is_feat_pauth_present(), "PAUTH");
-#endif
+ return false;
}
static unsigned int read_feat_rng_trap_id_field(void)
@@ -326,127 +319,151 @@
* { FEAT_STATE_DISABLED, FEAT_STATE_ALWAYS, FEAT_STATE_CHECK }, taking values
* { 0, 1, 2 }, respectively, as their naming.
**********************************************************************************/
-void detect_arch_features(void)
+void detect_arch_features(unsigned int core_pos)
{
- tainted = false;
+ /* No need to keep checking after the first time for each core. */
+ if (detection_done[core_pos]) {
+ return;
+ }
+
+ bool tainted = false;
/* v8.0 features */
- check_feature(ENABLE_FEAT_SB, read_feat_sb_id_field(), "SB", 1, 1);
- check_feature(ENABLE_FEAT_CSV2_2, read_feat_csv2_id_field(),
- "CSV2_2", 2, 3);
+ tainted |= check_feature(ENABLE_FEAT_SB, read_feat_sb_id_field(),
+ "SB", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_CSV2_2, read_feat_csv2_id_field(),
+ "CSV2_2", 2, 3);
/*
* Even though the PMUv3 is an OPTIONAL feature, it is always
* implemented and Arm prescribes so. So assume it will be there and do
* away with a flag for it. This is used to check minor PMUv3px
* revisions so that we catch them as they come along
*/
- check_feature(FEAT_STATE_ALWAYS, read_feat_pmuv3_id_field(),
- "PMUv3", 1, ID_AA64DFR0_PMUVER_PMUV3P9);
+ tainted |= check_feature(FEAT_STATE_ALWAYS, read_feat_pmuv3_id_field(),
+ "PMUv3", 1, ID_AA64DFR0_PMUVER_PMUV3P9);
/* v8.1 features */
- check_feature(ENABLE_FEAT_PAN, read_feat_pan_id_field(), "PAN", 1, 3);
- check_feature(ENABLE_FEAT_VHE, read_feat_vhe_id_field(), "VHE", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_PAN, read_feat_pan_id_field(),
+ "PAN", 1, 3);
+ tainted |= check_feature(ENABLE_FEAT_VHE, read_feat_vhe_id_field(),
+ "VHE", 1, 1);
/* v8.2 features */
- check_feature(ENABLE_SVE_FOR_NS, read_feat_sve_id_field(),
- "SVE", 1, 1);
- check_feature(ENABLE_FEAT_RAS, read_feat_ras_id_field(), "RAS", 1, 2);
+ tainted |= check_feature(ENABLE_SVE_FOR_NS, read_feat_sve_id_field(),
+ "SVE", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_RAS, read_feat_ras_id_field(),
+ "RAS", 1, 2);
/* v8.3 features */
- /* TODO: Pauth yet to convert to tri-state feat detect logic */
- read_feat_pauth();
+ /* the PAuth fields are very complicated, no min/max is checked */
+ tainted |= check_feature(ENABLE_PAUTH, is_feat_pauth_present(),
+ "PAUTH", 1, 1);
/* v8.4 features */
- check_feature(ENABLE_FEAT_DIT, read_feat_dit_id_field(), "DIT", 1, 1);
- check_feature(ENABLE_FEAT_AMU, read_feat_amu_id_field(),
- "AMUv1", 1, 2);
- check_feature(ENABLE_FEAT_MOPS, read_feat_mops_id_field(),
- "MOPS", 1, 1);
- check_feature(ENABLE_FEAT_MPAM, read_feat_mpam_version(),
- "MPAM", 1, 17);
- check_feature(CTX_INCLUDE_NEVE_REGS, read_feat_nv_id_field(),
- "NV2", 2, 2);
- check_feature(ENABLE_FEAT_SEL2, read_feat_sel2_id_field(),
- "SEL2", 1, 1);
- check_feature(ENABLE_TRF_FOR_NS, read_feat_trf_id_field(),
- "TRF", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_DIT, read_feat_dit_id_field(),
+ "DIT", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_AMU, read_feat_amu_id_field(),
+ "AMUv1", 1, 2);
+ tainted |= check_feature(ENABLE_FEAT_MOPS, read_feat_mops_id_field(),
+ "MOPS", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_MPAM, read_feat_mpam_version(),
+ "MPAM", 1, 17);
+ tainted |= check_feature(CTX_INCLUDE_NEVE_REGS, read_feat_nv_id_field(),
+ "NV2", 2, 2);
+ tainted |= check_feature(ENABLE_FEAT_SEL2, read_feat_sel2_id_field(),
+ "SEL2", 1, 1);
+ tainted |= check_feature(ENABLE_TRF_FOR_NS, read_feat_trf_id_field(),
+ "TRF", 1, 1);
/* v8.5 features */
- check_feature(ENABLE_FEAT_MTE2, get_armv8_5_mte_support(), "MTE2",
- MTE_IMPLEMENTED_ELX, MTE_IMPLEMENTED_ASY);
- check_feature(ENABLE_FEAT_RNG, read_feat_rng_id_field(), "RNG", 1, 1);
- check_feature(ENABLE_BTI, read_feat_bti_id_field(), "BTI", 1, 1);
- check_feature(ENABLE_FEAT_RNG_TRAP, read_feat_rng_trap_id_field(),
- "RNG_TRAP", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_MTE2, get_armv8_5_mte_support(),
+ "MTE2", MTE_IMPLEMENTED_ELX, MTE_IMPLEMENTED_ASY);
+ tainted |= check_feature(ENABLE_FEAT_RNG, read_feat_rng_id_field(),
+ "RNG", 1, 1);
+ tainted |= check_feature(ENABLE_BTI, read_feat_bti_id_field(),
+ "BTI", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_RNG_TRAP, read_feat_rng_trap_id_field(),
+ "RNG_TRAP", 1, 1);
/* v8.6 features */
- check_feature(ENABLE_FEAT_AMUv1p1, read_feat_amu_id_field(),
- "AMUv1p1", 2, 2);
- check_feature(ENABLE_FEAT_FGT, read_feat_fgt_id_field(), "FGT", 1, 2);
- check_feature(ENABLE_FEAT_FGT2, read_feat_fgt_id_field(), "FGT2", 2, 2);
- check_feature(ENABLE_FEAT_ECV, read_feat_ecv_id_field(), "ECV", 1, 2);
- check_feature(ENABLE_FEAT_TWED, read_feat_twed_id_field(),
- "TWED", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_AMUv1p1, read_feat_amu_id_field(),
+ "AMUv1p1", 2, 2);
+ tainted |= check_feature(ENABLE_FEAT_FGT, read_feat_fgt_id_field(),
+ "FGT", 1, 2);
+ tainted |= check_feature(ENABLE_FEAT_FGT2, read_feat_fgt_id_field(),
+ "FGT2", 2, 2);
+ tainted |= check_feature(ENABLE_FEAT_ECV, read_feat_ecv_id_field(),
+ "ECV", 1, 2);
+ tainted |= check_feature(ENABLE_FEAT_TWED, read_feat_twed_id_field(),
+ "TWED", 1, 1);
/*
* even though this is a "DISABLE" it does confusingly perform feature
* enablement duties like all other flags here. Check it against the HW
* feature when we intend to diverge from the default behaviour
*/
- check_feature(DISABLE_MTPMU, read_feat_mtpmu_id_field(), "MTPMU", 1, 1);
+ tainted |= check_feature(DISABLE_MTPMU, read_feat_mtpmu_id_field(),
+ "MTPMU", 1, 1);
/* v8.7 features */
- check_feature(ENABLE_FEAT_HCX, read_feat_hcx_id_field(), "HCX", 1, 1);
- check_feature(ENABLE_FEAT_LS64_ACCDATA, read_feat_ls64_id_field(), "LS64", 1, 3);
+ tainted |= check_feature(ENABLE_FEAT_HCX, read_feat_hcx_id_field(),
+ "HCX", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_LS64_ACCDATA, read_feat_ls64_id_field(),
+ "LS64", 1, 3);
/* v8.9 features */
- check_feature(ENABLE_FEAT_TCR2, read_feat_tcr2_id_field(),
- "TCR2", 1, 1);
- check_feature(ENABLE_FEAT_S2PIE, read_feat_s2pie_id_field(),
- "S2PIE", 1, 1);
- check_feature(ENABLE_FEAT_S1PIE, read_feat_s1pie_id_field(),
- "S1PIE", 1, 1);
- check_feature(ENABLE_FEAT_S2POE, read_feat_s2poe_id_field(),
- "S2POE", 1, 1);
- check_feature(ENABLE_FEAT_S1POE, read_feat_s1poe_id_field(),
- "S1POE", 1, 1);
- check_feature(ENABLE_FEAT_CSV2_3, read_feat_csv2_id_field(),
- "CSV2_3", 3, 3);
- check_feature(ENABLE_FEAT_DEBUGV8P9, read_feat_debugv8p9_id_field(),
- "DEBUGV8P9", 11, 11);
- check_feature(ENABLE_FEAT_THE, read_feat_the_id_field(),
- "THE", 1, 1);
- check_feature(ENABLE_FEAT_SCTLR2, read_feat_sctlr2_id_field(),
- "SCTLR2", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_TCR2, read_feat_tcr2_id_field(),
+ "TCR2", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_S2PIE, read_feat_s2pie_id_field(),
+ "S2PIE", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_S1PIE, read_feat_s1pie_id_field(),
+ "S1PIE", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_S2POE, read_feat_s2poe_id_field(),
+ "S2POE", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_S1POE, read_feat_s1poe_id_field(),
+ "S1POE", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_CSV2_3, read_feat_csv2_id_field(),
+ "CSV2_3", 3, 3);
+ tainted |= check_feature(ENABLE_FEAT_DEBUGV8P9, read_feat_debugv8p9_id_field(),
+ "DEBUGV8P9", 11, 11);
+ tainted |= check_feature(ENABLE_FEAT_THE, read_feat_the_id_field(),
+ "THE", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_SCTLR2, read_feat_sctlr2_id_field(),
+ "SCTLR2", 1, 1);
/* v9.0 features */
- check_feature(ENABLE_BRBE_FOR_NS, read_feat_brbe_id_field(),
- "BRBE", 1, 2);
- check_feature(ENABLE_TRBE_FOR_NS, read_feat_trbe_id_field(),
- "TRBE", 1, 1);
+ tainted |= check_feature(ENABLE_BRBE_FOR_NS, read_feat_brbe_id_field(),
+ "BRBE", 1, 2);
+ tainted |= check_feature(ENABLE_TRBE_FOR_NS, read_feat_trbe_id_field(),
+ "TRBE", 1, 1);
/* v9.2 features */
- check_feature(ENABLE_SME_FOR_NS, read_feat_sme_id_field(),
- "SME", 1, 2);
- check_feature(ENABLE_SME2_FOR_NS, read_feat_sme_id_field(),
- "SME2", 2, 2);
- check_feature(ENABLE_FEAT_FPMR, read_feat_fpmr_id_field(),
- "FPMR", 1, 1);
+ tainted |= check_feature(ENABLE_SME_FOR_NS, read_feat_sme_id_field(),
+ "SME", 1, 2);
+ tainted |= check_feature(ENABLE_SME2_FOR_NS, read_feat_sme_id_field(),
+ "SME2", 2, 2);
+ tainted |= check_feature(ENABLE_FEAT_FPMR, read_feat_fpmr_id_field(),
+ "FPMR", 1, 1);
/* v9.3 features */
- check_feature(ENABLE_FEAT_D128, read_feat_d128_id_field(),
- "D128", 1, 1);
- check_feature(ENABLE_FEAT_GCIE, read_feat_gcie_id_field(),
- "GCIE", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_D128, read_feat_d128_id_field(),
+ "D128", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_GCIE, read_feat_gcie_id_field(),
+ "GCIE", 1, 1);
/* v9.4 features */
- check_feature(ENABLE_FEAT_GCS, read_feat_gcs_id_field(), "GCS", 1, 1);
- check_feature(ENABLE_RME, read_feat_rme_id_field(), "RME", 1, 1);
- check_feature(ENABLE_FEAT_PAUTH_LR, is_feat_pauth_lr_present(), "PAUTH_LR", 1, 1);
- check_feature(ENABLE_FEAT_FGWTE3, read_feat_fgwte3_id_field(),
- "FGWTE3", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_GCS, read_feat_gcs_id_field(),
+ "GCS", 1, 1);
+ tainted |= check_feature(ENABLE_RME, read_feat_rme_id_field(),
+ "RME", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_PAUTH_LR, is_feat_pauth_lr_present(),
+ "PAUTH_LR", 1, 1);
+ tainted |= check_feature(ENABLE_FEAT_FGWTE3, read_feat_fgwte3_id_field(),
+ "FGWTE3", 1, 1);
if (tainted) {
panic();
}
+
+ detection_done[core_pos] = true;
}
diff --git a/docs/components/secure-partition-manager-mm.rst b/docs/components/secure-partition-manager-mm.rst
index d9b2b1b..589a362 100644
--- a/docs/components/secure-partition-manager-mm.rst
+++ b/docs/components/secure-partition-manager-mm.rst
@@ -652,6 +652,13 @@
There are no alignment restrictions on the Base Address. The permission
attributes of the translation granule it lies in are returned.
+ - **uint32** Input Page Count
+
+ This parameter is the number of translation granule size pages from
+ *Base Address* whose permission should be returned.
+ This is calculated as *Input Page count + 1*.
+ (i.e. If Input Page Count is 0, then it is calculated as 1).
+
- Return parameters
- **int32** - Memory Attributes/Return Code
@@ -687,6 +694,16 @@
See `Error Codes`_ for integer values that are associated with each return
code.
+ - **uint32** - Output Page Count
+
+ On success, the number of translation granule size pages from
+ the *Base address* whose permissions match those returned in the
+ *Memory Attributes* output parameter.
+ This is calculated as *Output Page count + 1*.
+ (i.e. If Output Page Count is 0, It is calculated as 1).
+
+ On failure, It must be zero:
+
- Usage
This function is used to request the permission attributes for S-EL0 on a
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index 8a607ac..e2fba99 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -558,12 +558,6 @@
power domain dynamic power budgeting and limit the triggering of whole-rail
(i.e. clock chopping) responses to overcurrent conditions. Defaults to ``0``.
- - ``FEAT_PABANDON``: Boolean option to enable support for powerdown abandon on
- Arm cores that support it (currently Gelas and Travis). Extends the PSCI
- implementation to expect waking up after the terminal ``wfi``. Currently,
- introduces a performance penalty. Once this is removed, this option will be
- removed and the feature will be enabled by default. Defaults to ``0``.
-
- ``ENABLE_PIE``: Boolean option to enable Position Independent Executable(PIE)
support within generic code in TF-A. This option is currently only supported
in BL2, BL31, and BL32 (TSP) for AARCH64 binaries, and
diff --git a/docs/porting-guide.rst b/docs/porting-guide.rst
index 4157659..e37fe1f 100644
--- a/docs/porting-guide.rst
+++ b/docs/porting-guide.rst
@@ -3107,13 +3107,14 @@
the CPU power domain and its parent power domain levels.
It is preferred that this function returns. The caller will invoke
-``psci_power_down_wfi()`` to powerdown the CPU, mitigate any powerdown errata,
+``wfi()`` to powerdown the CPU, mitigate any powerdown errata,
and handle any wakeups that may arise. Previously, this function did not return
and instead called ``wfi`` (in an infinite loop) directly. This is still
possible on platforms where this is guaranteed to be terminal, however, it is
strongly discouraged going forward.
-Previously this function was called ``pwr_domain_pwr_down_wfi()``.
+Previously this function was called ``pwr_domain_pwr_down_wfi()`` and invoked
+``psci_power_down_wfi()`` (now removed).
plat_psci_ops.pwr_domain_on_finish()
....................................
@@ -3634,8 +3635,8 @@
External Abort handling and RAS Support
---------------------------------------
-If any cores on the platform support powerdown abandon (i.e. ``FEAT_PABANDON``
-is set, check the "Core powerup and powerdown sequence" in their TRMs), then
+If any cores on the platform support powerdown abandon (check the "Core powerup
+and powerdown sequence" in their TRMs), then
these functions should be able to handle being called with power domains off and
after the powerdown ``wfi``. In other words it may run after a call to
``pwr_domain_suspend()`` and before a call to ``pwr_domain_suspend_finish()``
diff --git a/docs/security_advisories/index.rst b/docs/security_advisories/index.rst
index 0ab73bc..a6fed96 100644
--- a/docs/security_advisories/index.rst
+++ b/docs/security_advisories/index.rst
@@ -17,3 +17,4 @@
security-advisory-tfv-10.rst
security-advisory-tfv-11.rst
security-advisory-tfv-12.rst
+ security-advisory-tfv-13.rst
diff --git a/docs/security_advisories/security-advisory-tfv-13.rst b/docs/security_advisories/security-advisory-tfv-13.rst
new file mode 100644
index 0000000..929e9b8
--- /dev/null
+++ b/docs/security_advisories/security-advisory-tfv-13.rst
@@ -0,0 +1,82 @@
+Advisory TFV-13 (CVE-2024-7881)
+================================
+
++----------------+-----------------------------------------------------------------+
+| Title | An unprivileged context can trigger a data memory-dependent |
+| | prefetch engine to fetch the contents of a privileged location |
+| | and consume those contents as an address that is |
+| | also dereferenced. |
+| | |
++================+=================================================================+
+| CVE ID | `CVE-2024-7881`_ |
++----------------+-----------------------------------------------------------------+
+| Date | Reported on 16 August 2024 |
++----------------+-----------------------------------------------------------------+
+| Versions | TF-A version from v2.2 to v2.12 |
+| Affected | LTS releases lts-v2.8.0 to lts-v2.8.28 |
+| | LTS releases lts-v2.10.0 to lts-v2.10.12 |
++----------------+-----------------------------------------------------------------+
+| Configurations | All |
+| Affected | |
++----------------+-----------------------------------------------------------------+
+| Impact | Potential leakage of secure world data to normal world. |
++----------------+-----------------------------------------------------------------+
+| Fix Version | `Gerrit topic #ar/smccc_arch_wa_4`_ |
+| | Also see mitigation guidance in the `Official Arm Advisory`_ |
++----------------+-----------------------------------------------------------------+
+| Credit | Arm |
++----------------+-----------------------------------------------------------------+
+
+Description
+-----------
+
+An issue has been identified in some Arm-based CPUs that may allow
+an unprivileged context to trigger a data memory-dependent prefetch engine
+to fetch the contents of a privileged location (for which it
+does not have read permission) and consume those contents as an address
+that is also dereferenced.
+
+The below table lists all the CPUs impacted by this vulnerability and have
+mitigation in TF-A.
+
++----------------------+
+| Core |
++----------------------+
+| Cortex-X3 |
++----------------------+
+| Cortex-X4 |
++----------------------+
+| Cortex-X925 |
++----------------------+
+| Neoverse-V2 |
++----------------------+
+| Neoverse-V3 |
++----------------------+
+| Neoverse-V3AE |
++----------------------+
+
+Mitigation and Recommendations
+------------------------------
+
+Arm recommends following the mitigation steps and configuration changes
+described in the official advisory. The mitigation for CVE-2024-7881 is
+implemented at EL3 and addresses vulnerabilities caused by memory-dependant
+speculative prefetching. This issue is avoided by setting CPUACTLR6_EL1[41]
+to 1, this disables the affected prefetcher.
+
+Arm has updated the SMC Calling Convention spec so that privileged normal world
+software can identify when the issue has been mitigated in
+firmware (SMCCC_ARCH_WORKAROUND_4). Refer to the `SMC Calling Convention
+Specification`_ for more details.
+
+The above workaround is enabled by default (on vulnerable CPUs only).
+Platforms can choose to disable them at compile time if
+they do not require them.
+
+For further technical information, affected CPUs, and detailed guidance,
+refer to the full `Official Arm Advisory`_.
+
+.. _CVE-2024-7881: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-7881
+.. _Gerrit topic #ar/smccc_arch_wa_4: https://review.trustedfirmware.org/q/topic:%22ar/smccc_arch_wa_4%22
+.. _SMC Calling Convention specification: https://developer.arm.com/documentation/den0028/latest
+.. _Official Arm Advisory: https://developer.arm.com/documentation/110326/latest
diff --git a/fdts/fvp-base-psci-common.dtsi b/fdts/fvp-base-psci-common.dtsi
index 2a128d8..92d832f 100644
--- a/fdts/fvp-base-psci-common.dtsi
+++ b/fdts/fvp-base-psci-common.dtsi
@@ -259,10 +259,10 @@
ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>,
/* First 3GB of 256GB PCIe memory region 2 */
<0x2000000 0x40 0x00000000 0x40 0x00000000 0x0 0xc0000000>;
- interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &gic 0 0 GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &gic 0 0 GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 1 &gic 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic 0 GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic 0 GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
msi-map = <0x0 &its 0x0 0x10000>;
iommu-map = <0x0 &smmu 0x0 0x10000>;
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index ee18309..a1cf5be 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -706,9 +706,8 @@
#define MDCR_SBRBE(x) ((x) << MDCR_SBRBE_SHIFT)
#define MDCR_SBRBE_ALL ULL(0x3)
#define MDCR_SBRBE_NS ULL(0x1)
-#define MDCR_NSTB(x) ((x) << 24)
-#define MDCR_NSTB_EL1 ULL(0x3)
-#define MDCR_NSTB_EL3 ULL(0x2)
+#define MDCR_NSTB_EN_BIT (ULL(1) << 24)
+#define MDCR_NSTB_SS_BIT (ULL(1) << 25)
#define MDCR_NSTBE_BIT (ULL(1) << 26)
#define MDCR_MTPME_BIT (ULL(1) << 28)
#define MDCR_TDCC_BIT (ULL(1) << 27)
@@ -723,9 +722,8 @@
#define MDCR_SPD32_LEGACY ULL(0x0)
#define MDCR_SPD32_DISABLE ULL(0x2)
#define MDCR_SPD32_ENABLE ULL(0x3)
-#define MDCR_NSPB(x) ((x) << 12)
-#define MDCR_NSPB_EL1 ULL(0x3)
-#define MDCR_NSPB_EL3 ULL(0x2)
+#define MDCR_NSPB_SS_BIT (ULL(1) << 13)
+#define MDCR_NSPB_EN_BIT (ULL(1) << 12)
#define MDCR_NSPBE_BIT (ULL(1) << 11)
#define MDCR_TDOSA_BIT (ULL(1) << 10)
#define MDCR_TDA_BIT (ULL(1) << 9)
@@ -891,6 +889,24 @@
#define SPSR_NZCV (SPSR_V_BIT | SPSR_C_BIT | SPSR_Z_BIT | SPSR_N_BIT)
#define SPSR_PACM_BIT_AARCH64 BIT_64(35)
+/*
+ * SPSR_EL2
+ * M=0x9 (0b1001 EL2h)
+ * M[4]=0
+ * DAIF=0xF Exceptions masked on entry.
+ * BTYPE=0 BTI not yet supported.
+ * SSBS=0 Not yet supported.
+ * IL=0 Not an illegal exception return.
+ * SS=0 Not single stepping.
+ * PAN=1 RMM shouldn't access Unprivileged memory when running in VHE mode.
+ * UAO=0
+ * DIT=0
+ * TCO=0
+ * NZCV=0
+ */
+#define SPSR_EL2_REALM (SPSR_M_EL2H | (0xF << SPSR_DAIF_SHIFT) | \
+ SPSR_PAN_BIT)
+
#define DISABLE_ALL_EXCEPTIONS \
(DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
#define DISABLE_INTERRUPTS (DAIF_FIQ_BIT | DAIF_IRQ_BIT)
diff --git a/include/common/feat_detect.h b/include/common/feat_detect.h
index 18e6c42..04e4c02 100644
--- a/include/common/feat_detect.h
+++ b/include/common/feat_detect.h
@@ -8,7 +8,7 @@
#define FEAT_DETECT_H
/* Function Prototypes */
-void detect_arch_features(void);
+void detect_arch_features(unsigned int core_pos);
/* Macro Definitions */
#define FEAT_STATE_DISABLED 0
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index 84107a4..331bfdb 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -653,4 +653,15 @@
#endif
.endm
+/*
+ * Call this just before a return to indicate support for pabandon. Only
+ * necessary on an abandon call, but harmless on a powerdown call.
+ *
+ * PSCI wants us to tell it we handled a pabandon by returning 0. This is the
+ * only way support for it is indicated.
+ */
+.macro signal_pabandon_handled
+ mov_imm x0, PABANDON_ACK
+.endm
+
#endif /* CPU_MACROS_S */
diff --git a/include/lib/cpus/cpu_ops.h b/include/lib/cpus/cpu_ops.h
index 5ba78cf..765cd59 100644
--- a/include/lib/cpus/cpu_ops.h
+++ b/include/lib/cpus/cpu_ops.h
@@ -21,6 +21,12 @@
/* The number of CPU operations allowed */
#define CPU_MAX_PWR_DWN_OPS 2
+/*
+ * value needs to be distinct from CPUPWRCTLR_EL1 likely values: its top bits
+ * are RES0 and its bottom bits will be written to power down. Pick the opposite
+ * with something that looks like "abandon" in the middle.
+ */
+#define PABANDON_ACK 0xffaba4d4aba4d400
/*
* Define the sizes of the fields in the cpu_ops structure. Word size is set per
@@ -104,7 +110,7 @@
void (*e_handler_func)(long es);
#endif /* __aarch64__ */
#if (defined(IMAGE_BL31) || defined(IMAGE_BL32)) && CPU_MAX_PWR_DWN_OPS
- void (*pwr_dwn_ops[CPU_MAX_PWR_DWN_OPS])(void);
+ u_register_t (*pwr_dwn_ops[CPU_MAX_PWR_DWN_OPS])();
#endif /* (defined(IMAGE_BL31) || defined(IMAGE_BL32)) && CPU_MAX_PWR_DWN_OPS */
void *errata_list_start;
void *errata_list_end;
diff --git a/include/lib/extensions/spe.h b/include/lib/extensions/spe.h
index 0a41e1e..b58e18c 100644
--- a/include/lib/extensions/spe.h
+++ b/include/lib/extensions/spe.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,14 +11,18 @@
#include <context.h>
#if ENABLE_SPE_FOR_NS
-void spe_enable(cpu_context_t *ctx);
-void spe_disable(cpu_context_t *ctx);
+void spe_enable_ns(cpu_context_t *ctx);
+void spe_disable_secure(cpu_context_t *ctx);
+void spe_disable_realm(cpu_context_t *ctx);
void spe_init_el2_unused(void);
#else
-static inline void spe_enable(cpu_context_t *ctx)
+static inline void spe_enable_ns(cpu_context_t *ctx)
{
}
-static inline void spe_disable(cpu_context_t *ctx)
+static inline void spe_disable_secure(cpu_context_t *ctx)
+{
+}
+static inline void spe_disable_realm(cpu_context_t *ctx)
{
}
static inline void spe_init_el2_unused(void)
diff --git a/include/lib/extensions/trbe.h b/include/lib/extensions/trbe.h
index 2c488e0..bd36f54 100644
--- a/include/lib/extensions/trbe.h
+++ b/include/lib/extensions/trbe.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,14 +10,22 @@
#include <context.h>
#if ENABLE_TRBE_FOR_NS
-void trbe_disable(cpu_context_t *ctx);
-void trbe_enable(cpu_context_t *ctx);
+void trbe_enable_ns(cpu_context_t *ctx);
+void trbe_disable_ns(cpu_context_t *ctx);
+void trbe_disable_secure(cpu_context_t *ctx);
+void trbe_disable_realm(cpu_context_t *ctx);
void trbe_init_el2_unused(void);
#else
-static inline void trbe_disable(cpu_context_t *ctx)
+static inline void trbe_enable_ns(cpu_context_t *ctx)
{
}
-static inline void trbe_enable(cpu_context_t *ctx)
+static inline void trbe_disable_ns(cpu_context_t *ctx)
+{
+}
+static inline void trbe_disable_secure(cpu_context_t *ctx)
+{
+}
+static inline void trbe_disable_realm(cpu_context_t *ctx)
{
}
static inline void trbe_init_el2_unused(void)
diff --git a/include/lib/psci/psci.h b/include/lib/psci/psci.h
index 68e721a..b146d39 100644
--- a/include/lib/psci/psci.h
+++ b/include/lib/psci/psci.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -376,7 +376,6 @@
#if PSCI_OS_INIT_MODE
int psci_set_suspend_mode(unsigned int mode);
#endif
-void psci_power_down_wfi(void);
void psci_arch_setup(void);
#endif /*__ASSEMBLER__*/
diff --git a/include/lib/psci/psci_lib.h b/include/lib/psci/psci_lib.h
index 1e1d5fc..12efe17 100644
--- a/include/lib/psci/psci_lib.h
+++ b/include/lib/psci/psci_lib.h
@@ -25,7 +25,7 @@
int32_t (*svc_off)(u_register_t __unused unused);
void (*svc_suspend)(u_register_t max_off_pwrlvl);
void (*svc_on_finish)(u_register_t __unused unused);
- void (*svc_suspend_finish)(u_register_t max_off_pwrlvl);
+ void (*svc_suspend_finish)(u_register_t max_off_pwrlvl, bool abandon);
int32_t (*svc_migrate)(u_register_t from_cpu, u_register_t to_cpu);
int32_t (*svc_migrate_info)(u_register_t *resident_cpu);
void (*svc_system_off)(void);
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
index 64fe5ef..6a0d890 100644
--- a/include/lib/xlat_tables/xlat_tables_v2.h
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -407,9 +407,11 @@
* memory page it lies within are returned.
* attr
* Output parameter where to store the attributes of the targeted memory page.
+ * table_level
+ * Output parameter where to store base_va's table level
*/
int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
- uint32_t *attr);
+ uint32_t *attr, unsigned int *table_level);
int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr);
#endif /*__ASSEMBLER__*/
diff --git a/include/services/arm_arch_svc.h b/include/services/arm_arch_svc.h
index ea7de0e..242bf66 100644
--- a/include/services/arm_arch_svc.h
+++ b/include/services/arm_arch_svc.h
@@ -207,6 +207,7 @@
SCR_FIQ_BIT | \
SCR_IRQ_BIT | \
SCR_NS_BIT | \
+ SCR_NSE_BIT | \
SCR_RES1_BITS | \
SCR_FEAT_MEC | \
SCR_PLAT_IGNORED)
@@ -271,7 +272,7 @@
#endif
#if ENABLE_TRBE_FOR_NS
-#define MDCR_FEAT_TRBE MDCR_NSTB(1UL)
+#define MDCR_FEAT_TRBE MDCR_NSTB_EN_BIT
#else
#define MDCR_FEAT_TRBE (0)
#endif
@@ -283,7 +284,7 @@
#endif
#if ENABLE_SPE_FOR_NS
-#define MDCR_FEAT_SPE MDCR_NSPB(1UL)
+#define MDCR_FEAT_SPE MDCR_NSPB_EN_BIT
#else
#define MDCR_FEAT_SPE (0)
#endif
@@ -313,12 +314,12 @@
MDCR_SBRBE(2UL) | \
MDCR_MTPME_BIT | \
MDCR_NSTBE_BIT | \
- MDCR_NSTB(2UL) | \
+ MDCR_NSTB_SS_BIT | \
MDCR_MCCD_BIT | \
MDCR_SCCD_BIT | \
MDCR_SDD_BIT | \
MDCR_SPD32(3UL) | \
- MDCR_NSPB(2UL) | \
+ MDCR_NSPB_SS_BIT | \
MDCR_NSPBE_BIT | \
MDCR_PLAT_IGNORED)
CASSERT((MDCR_EL3_FEATS & MDCR_EL3_IGNORED) == 0, mdcr_feat_is_ignored);
diff --git a/lib/aarch64/armclang_printf.S b/lib/aarch64/armclang_printf.S
index f9326fd..12622ae 100644
--- a/lib/aarch64/armclang_printf.S
+++ b/lib/aarch64/armclang_printf.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,6 +11,7 @@
.globl __0printf
.globl __1printf
.globl __2printf
+ .globl __2snprintf
func __0printf
b printf
@@ -23,3 +24,7 @@
func __2printf
b printf
endfunc __2printf
+
+func __2snprintf
+ b snprintf
+endfunc __2snprintf
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
index 83e3e49..863448c 100644
--- a/lib/cpus/aarch32/cpu_helpers.S
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -47,46 +47,7 @@
#endif
-#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
- /*
- * void prepare_cpu_pwr_dwn(unsigned int power_level)
- *
- * Prepare CPU power down function for all platforms. The function takes
- * a domain level to be powered down as its parameter. After the cpu_ops
- * pointer is retrieved from cpu_data, the handler for requested power
- * level is called.
- */
- .globl prepare_cpu_pwr_dwn
-func prepare_cpu_pwr_dwn
- /*
- * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
- * power down handler for the last power level
- */
- mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
- cmp r0, r2
- movhi r0, r2
-
- push {r0, lr}
- bl _cpu_data
- pop {r2, lr}
-
- ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
-#if ENABLE_ASSERTIONS
- cmp r0, #0
- ASM_ASSERT(ne)
-#endif
-
- /* Get the appropriate power down handler */
- mov r1, #CPU_PWR_DWN_OPS
- add r1, r1, r2, lsl #2
- ldr r1, [r0, r1]
-#if ENABLE_ASSERTIONS
- cmp r1, #0
- ASM_ASSERT(ne)
-#endif
- bx r1
-endfunc prepare_cpu_pwr_dwn
-
+#ifdef IMAGE_BL32
/*
* Initializes the cpu_ops_ptr if not already initialized
* in cpu_data. This must only be called after the data cache
diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S
index 9843943..243f657 100644
--- a/lib/cpus/aarch64/aem_generic.S
+++ b/lib/cpus/aarch64/aem_generic.S
@@ -12,15 +12,6 @@
func aem_generic_core_pwr_dwn
/* ---------------------------------------------
- * Disable the Data Cache.
- * ---------------------------------------------
- */
- mrs x1, sctlr_el3
- bic x1, x1, #SCTLR_C_BIT
- msr sctlr_el3, x1
- isb
-
- /* ---------------------------------------------
* AEM model supports L3 caches in which case L2
* will be private per core caches and flush
* from L1 to L2 is not sufficient.
@@ -60,15 +51,6 @@
func aem_generic_cluster_pwr_dwn
/* ---------------------------------------------
- * Disable the Data Cache.
- * ---------------------------------------------
- */
- mrs x1, sctlr_el3
- bic x1, x1, #SCTLR_C_BIT
- msr sctlr_el3, x1
- isb
-
- /* ---------------------------------------------
* Flush all caches to PoC.
* ---------------------------------------------
*/
diff --git a/lib/cpus/aarch64/cortex_a35.S b/lib/cpus/aarch64/cortex_a35.S
index 40e6200..bb354df 100644
--- a/lib/cpus/aarch64/cortex_a35.S
+++ b/lib/cpus/aarch64/cortex_a35.S
@@ -13,16 +13,6 @@
cpu_reset_prologue cortex_a35
/* ---------------------------------------------
- * Disable L1 data cache and unified L2 cache
- * ---------------------------------------------
- */
-func cortex_a35_disable_dcache
- sysreg_bit_clear sctlr_el3, SCTLR_C_BIT
- isb
- ret
-endfunc cortex_a35_disable_dcache
-
- /* ---------------------------------------------
* Disable intra-cluster coherency
* ---------------------------------------------
*/
@@ -55,12 +45,6 @@
mov x18, x30
/* ---------------------------------------------
- * Turn off caches.
- * ---------------------------------------------
- */
- bl cortex_a35_disable_dcache
-
- /* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
@@ -79,12 +63,6 @@
mov x18, x30
/* ---------------------------------------------
- * Turn off caches.
- * ---------------------------------------------
- */
- bl cortex_a35_disable_dcache
-
- /* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
diff --git a/lib/cpus/aarch64/cortex_a510.S b/lib/cpus/aarch64/cortex_a510.S
index 258817f..1285034 100644
--- a/lib/cpus/aarch64/cortex_a510.S
+++ b/lib/cpus/aarch64/cortex_a510.S
@@ -187,6 +187,7 @@
* ----------------------------------------------------
*/
func cortex_a510_core_pwr_dwn
+ apply_erratum cortex_a510, ERRATUM(2684597), ERRATA_A510_2684597
/* ---------------------------------------------------
* Enable CPU power down bit in power control register
* ---------------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index dbfff87..e3b69ab 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -15,16 +15,6 @@
cpu_reset_prologue cortex_a53
/* ---------------------------------------------
- * Disable L1 data cache and unified L2 cache
- * ---------------------------------------------
- */
-func cortex_a53_disable_dcache
- sysreg_bit_clear sctlr_el3, SCTLR_C_BIT
- isb
- ret
-endfunc cortex_a53_disable_dcache
-
- /* ---------------------------------------------
* Disable intra-cluster coherency
* ---------------------------------------------
*/
@@ -144,12 +134,6 @@
mov x18, x30
/* ---------------------------------------------
- * Turn off caches.
- * ---------------------------------------------
- */
- bl cortex_a53_disable_dcache
-
- /* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
@@ -168,12 +152,6 @@
mov x18, x30
/* ---------------------------------------------
- * Turn off caches.
- * ---------------------------------------------
- */
- bl cortex_a53_disable_dcache
-
- /* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index 553f6f9..18521a2 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -16,16 +16,6 @@
cpu_reset_prologue cortex_a57
/* ---------------------------------------------
- * Disable L1 data cache and unified L2 cache
- * ---------------------------------------------
- */
-func cortex_a57_disable_dcache
- sysreg_bit_clear sctlr_el3, SCTLR_C_BIT
- isb
- ret
-endfunc cortex_a57_disable_dcache
-
- /* ---------------------------------------------
* Disable all types of L2 prefetches.
* ---------------------------------------------
*/
@@ -59,7 +49,7 @@
msr osdlr_el1, x0
isb
- apply_erratum cortex_a57, ERRATUM(817169), ERRATA_A57_817169, NO_GET_CPU_REV
+ apply_erratum cortex_a57, ERRATUM(817169), ERRATA_A57_817169
dsb sy
ret
@@ -200,12 +190,6 @@
mov x18, x30
/* ---------------------------------------------
- * Turn off caches.
- * ---------------------------------------------
- */
- bl cortex_a57_disable_dcache
-
- /* ---------------------------------------------
* Disable the L2 prefetches.
* ---------------------------------------------
*/
@@ -240,12 +224,6 @@
mov x18, x30
/* ---------------------------------------------
- * Turn off caches.
- * ---------------------------------------------
- */
- bl cortex_a57_disable_dcache
-
- /* ---------------------------------------------
* Disable the L2 prefetches.
* ---------------------------------------------
*/
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
index 59b6244..65fa98f 100644
--- a/lib/cpus/aarch64/cortex_a710.S
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -276,7 +276,7 @@
* ----------------------------------------------------
*/
func cortex_a710_core_pwr_dwn
- apply_erratum cortex_a710, ERRATUM(2008768), ERRATA_A710_2008768, NO_GET_CPU_REV
+ apply_erratum cortex_a710, ERRATUM(2008768), ERRATA_A710_2008768
apply_erratum cortex_a710, ERRATUM(2291219), ERRATA_A710_2291219, NO_GET_CPU_REV
/* ---------------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 23b27ab..f35f867 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -18,18 +18,6 @@
cpu_reset_prologue cortex_a72
/* ---------------------------------------------
- * Disable L1 data cache and unified L2 cache
- * ---------------------------------------------
- */
-func cortex_a72_disable_dcache
- mrs x1, sctlr_el3
- bic x1, x1, #SCTLR_C_BIT
- msr sctlr_el3, x1
- isb
- ret
-endfunc cortex_a72_disable_dcache
-
- /* ---------------------------------------------
* Disable all types of L2 prefetches.
* ---------------------------------------------
*/
@@ -177,12 +165,6 @@
mov x18, x30
/* ---------------------------------------------
- * Turn off caches.
- * ---------------------------------------------
- */
- bl cortex_a72_disable_dcache
-
- /* ---------------------------------------------
* Disable the L2 prefetches.
* ---------------------------------------------
*/
@@ -223,12 +205,6 @@
mov x18, x30
/* ---------------------------------------------
- * Turn off caches.
- * ---------------------------------------------
- */
- bl cortex_a72_disable_dcache
-
- /* ---------------------------------------------
* Disable the L2 prefetches.
* ---------------------------------------------
*/
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index 9cc6fdb..14f1ef8 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -13,16 +13,6 @@
cpu_reset_prologue cortex_a73
/* ---------------------------------------------
- * Disable L1 data cache
- * ---------------------------------------------
- */
-func cortex_a73_disable_dcache
- sysreg_bit_clear sctlr_el3, SCTLR_C_BIT
- isb
- ret
-endfunc cortex_a73_disable_dcache
-
- /* ---------------------------------------------
* Disable intra-cluster coherency
* ---------------------------------------------
*/
@@ -123,12 +113,6 @@
mov x18, x30
/* ---------------------------------------------
- * Turn off caches.
- * ---------------------------------------------
- */
- bl cortex_a73_disable_dcache
-
- /* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
@@ -147,12 +131,6 @@
mov x18, x30
/* ---------------------------------------------
- * Turn off caches.
- * ---------------------------------------------
- */
- bl cortex_a73_disable_dcache
-
- /* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index ca5ccf7..fc6d737 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -515,7 +515,7 @@
*/
sysreg_bit_set CORTEX_A76_CPUPWRCTLR_EL1, CORTEX_A76_CORE_PWRDN_EN_MASK
- apply_erratum cortex_a76, ERRATUM(2743102), ERRATA_A76_2743102, NO_GET_CPU_REV
+ apply_erratum cortex_a76, ERRATUM(2743102), ERRATA_A76_2743102
isb
ret
diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S
index 82a20ec..09b25e2 100644
--- a/lib/cpus/aarch64/cortex_a77.S
+++ b/lib/cpus/aarch64/cortex_a77.S
@@ -169,7 +169,7 @@
sysreg_bit_set CORTEX_A77_CPUPWRCTLR_EL1, \
CORTEX_A77_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
- apply_erratum cortex_a77, ERRATUM(2743100), ERRATA_A77_2743100, NO_GET_CPU_REV
+ apply_erratum cortex_a77, ERRATUM(2743100), ERRATA_A77_2743100
isb
ret
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
index b166823..7623446 100644
--- a/lib/cpus/aarch64/cortex_a78.S
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -191,7 +191,7 @@
func cortex_a78_core_pwr_dwn
sysreg_bit_set CORTEX_A78_CPUPWRCTLR_EL1, CORTEX_A78_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
- apply_erratum cortex_a78, ERRATUM(2772019), ERRATA_A78_2772019, NO_GET_CPU_REV
+ apply_erratum cortex_a78, ERRATUM(2772019), ERRATA_A78_2772019
isb
ret
diff --git a/lib/cpus/aarch64/cortex_a78c.S b/lib/cpus/aarch64/cortex_a78c.S
index 19d988e..0349cc5 100644
--- a/lib/cpus/aarch64/cortex_a78c.S
+++ b/lib/cpus/aarch64/cortex_a78c.S
@@ -129,7 +129,7 @@
*/
sysreg_bit_set CORTEX_A78C_CPUPWRCTLR_EL1, CORTEX_A78C_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
- apply_erratum cortex_a78c, ERRATUM(2772121), ERRATA_A78C_2772121, NO_GET_CPU_REV
+ apply_erratum cortex_a78c, ERRATUM(2772121), ERRATA_A78C_2772121
isb
ret
diff --git a/lib/cpus/aarch64/cortex_alto.S b/lib/cpus/aarch64/cortex_alto.S
index 69a630d..6d0e08b 100644
--- a/lib/cpus/aarch64/cortex_alto.S
+++ b/lib/cpus/aarch64/cortex_alto.S
@@ -41,6 +41,7 @@
sysreg_bit_set CORTEX_ALTO_IMP_CPUPWRCTLR_EL1, \
CORTEX_ALTO_IMP_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
isb
+ signal_pabandon_handled
ret
endfunc cortex_alto_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_gelas.S b/lib/cpus/aarch64/cortex_gelas.S
index 4cdec32..d322006 100644
--- a/lib/cpus/aarch64/cortex_gelas.S
+++ b/lib/cpus/aarch64/cortex_gelas.S
@@ -21,10 +21,6 @@
#error "Gelas supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
-#if FEAT_PABANDON == 0
-#error "Gelas must be compiled with FEAT_PABANDON enabled"
-#endif
-
#if ERRATA_SME_POWER_DOWN == 0
#error "Gelas needs ERRATA_SME_POWER_DOWN=1 to powerdown correctly"
#endif
@@ -56,6 +52,7 @@
sysreg_bit_toggle CORTEX_GELAS_CPUPWRCTLR_EL1, \
CORTEX_GELAS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
isb
+ signal_pabandon_handled
ret
endfunc cortex_gelas_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
index 910a6a9..a67553d 100644
--- a/lib/cpus/aarch64/cortex_x2.S
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -180,7 +180,7 @@
*/
sysreg_bit_set CORTEX_X2_CPUPWRCTLR_EL1, CORTEX_X2_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
- apply_erratum cortex_x2, ERRATUM(2768515), ERRATA_X2_2768515, NO_GET_CPU_REV
+ apply_erratum cortex_x2, ERRATUM(2768515), ERRATA_X2_2768515
isb
ret
endfunc cortex_x2_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x3.S b/lib/cpus/aarch64/cortex_x3.S
index 36bb419..8879b54 100644
--- a/lib/cpus/aarch64/cortex_x3.S
+++ b/lib/cpus/aarch64/cortex_x3.S
@@ -155,7 +155,7 @@
* ----------------------------------------------------
*/
func cortex_x3_core_pwr_dwn
- apply_erratum cortex_x3, ERRATUM(2313909), ERRATA_X3_2313909, NO_GET_CPU_REV
+ apply_erratum cortex_x3, ERRATUM(2313909), ERRATA_X3_2313909
/* ---------------------------------------------------
* Enable CPU power down bit in power control register
* ---------------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_x4.S b/lib/cpus/aarch64/cortex_x4.S
index b1dc52c..72a2595 100644
--- a/lib/cpus/aarch64/cortex_x4.S
+++ b/lib/cpus/aarch64/cortex_x4.S
@@ -154,7 +154,7 @@
*/
sysreg_bit_set CORTEX_X4_CPUPWRCTLR_EL1, CORTEX_X4_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
- apply_erratum cortex_x4, ERRATUM(2740089), ERRATA_X4_2740089, NO_GET_CPU_REV
+ apply_erratum cortex_x4, ERRATUM(2740089), ERRATA_X4_2740089
isb
ret
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 105da5c..1b20d5c 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -14,44 +14,6 @@
#include <lib/cpus/errata.h>
#include <lib/el3_runtime/cpu_data.h>
-#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
- /*
- * void prepare_cpu_pwr_dwn(unsigned int power_level)
- *
- * Prepare CPU power down function for all platforms. The function takes
- * a domain level to be powered down as its parameter. After the cpu_ops
- * pointer is retrieved from cpu_data, the handler for requested power
- * level is called.
- */
- .globl prepare_cpu_pwr_dwn
-func prepare_cpu_pwr_dwn
- /*
- * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
- * power down handler for the last power level
- */
- mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
- cmp x0, x2
- csel x2, x2, x0, hi
-
- mrs x1, tpidr_el3
- ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
-#if ENABLE_ASSERTIONS
- cmp x0, #0
- ASM_ASSERT(ne)
-#endif
-
- /* Get the appropriate power down handler */
- mov x1, #CPU_PWR_DWN_OPS
- add x1, x1, x2, lsl #3
- ldr x1, [x0, x1]
-#if ENABLE_ASSERTIONS
- cmp x1, #0
- ASM_ASSERT(ne)
-#endif
- br x1
-endfunc prepare_cpu_pwr_dwn
-
-
/*
* Initializes the cpu_ops_ptr if not already initialized
* in cpu_data. This can be called without a runtime stack, but may
@@ -70,7 +32,6 @@
1:
ret
endfunc init_cpu_ops
-#endif /* IMAGE_BL31 */
#if defined(IMAGE_BL31) && CRASH_REPORTING
/*
diff --git a/lib/cpus/aarch64/generic.S b/lib/cpus/aarch64/generic.S
index 0a10eed..c59575c 100644
--- a/lib/cpus/aarch64/generic.S
+++ b/lib/cpus/aarch64/generic.S
@@ -13,28 +13,10 @@
cpu_reset_prologue generic
- /* ---------------------------------------------
- * Disable L1 data cache and unified L2 cache
- * ---------------------------------------------
- */
-func generic_disable_dcache
- mrs x1, sctlr_el3
- bic x1, x1, #SCTLR_C_BIT
- msr sctlr_el3, x1
- isb
- ret
-endfunc generic_disable_dcache
-
func generic_core_pwr_dwn
mov x18, x30
/* ---------------------------------------------
- * Turn off caches.
- * ---------------------------------------------
- */
- bl generic_disable_dcache
-
- /* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
@@ -48,12 +30,6 @@
mov x18, x30
/* ---------------------------------------------
- * Turn off caches.
- * ---------------------------------------------
- */
- bl generic_disable_dcache
-
- /* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
index e821ecb..baeb83c 100644
--- a/lib/cpus/aarch64/neoverse_n1.S
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -240,7 +240,7 @@
*/
sysreg_bit_set NEOVERSE_N1_CPUPWRCTLR_EL1, NEOVERSE_N1_CORE_PWRDN_EN_MASK
- apply_erratum neoverse_n1, ERRATUM(2743102), ERRATA_N1_2743102, NO_GET_CPU_REV
+ apply_erratum neoverse_n1, ERRATUM(2743102), ERRATA_N1_2743102
isb
ret
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
index 7d9d7f1..2f053ac 100644
--- a/lib/cpus/aarch64/neoverse_n2.S
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -274,7 +274,7 @@
cpu_reset_func_end neoverse_n2
func neoverse_n2_core_pwr_dwn
- apply_erratum neoverse_n2, ERRATUM(2009478), ERRATA_N2_2009478, NO_GET_CPU_REV
+ apply_erratum neoverse_n2, ERRATUM(2009478), ERRATA_N2_2009478
apply_erratum neoverse_n2, ERRATUM(2326639), ERRATA_N2_2326639, NO_GET_CPU_REV
/* ---------------------------------------------------
diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S
index f975be0..96b52aa 100644
--- a/lib/cpus/aarch64/neoverse_v1.S
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -253,7 +253,7 @@
* ---------------------------------------------
*/
sysreg_bit_set NEOVERSE_V1_CPUPWRCTLR_EL1, NEOVERSE_V1_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
- apply_erratum neoverse_v1, ERRATUM(2743093), ERRATA_V1_2743093, NO_GET_CPU_REV
+ apply_erratum neoverse_v1, ERRATUM(2743093), ERRATA_V1_2743093
isb
ret
diff --git a/lib/cpus/aarch64/neoverse_v2.S b/lib/cpus/aarch64/neoverse_v2.S
index 9526b80..8224f93 100644
--- a/lib/cpus/aarch64/neoverse_v2.S
+++ b/lib/cpus/aarch64/neoverse_v2.S
@@ -113,7 +113,7 @@
* ---------------------------------------------------
*/
sysreg_bit_set NEOVERSE_V2_CPUPWRCTLR_EL1, NEOVERSE_V2_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
- apply_erratum neoverse_v2, ERRATUM(2801372), ERRATA_V2_2801372, NO_GET_CPU_REV
+ apply_erratum neoverse_v2, ERRATUM(2801372), ERRATA_V2_2801372
isb
ret
diff --git a/lib/cpus/aarch64/qemu_max.S b/lib/cpus/aarch64/qemu_max.S
index a727379..7980066 100644
--- a/lib/cpus/aarch64/qemu_max.S
+++ b/lib/cpus/aarch64/qemu_max.S
@@ -12,15 +12,6 @@
func qemu_max_core_pwr_dwn
/* ---------------------------------------------
- * Disable the Data Cache.
- * ---------------------------------------------
- */
- mrs x1, sctlr_el3
- bic x1, x1, #SCTLR_C_BIT
- msr sctlr_el3, x1
- isb
-
- /* ---------------------------------------------
* Flush L1 cache to L2.
* ---------------------------------------------
*/
@@ -33,15 +24,6 @@
func qemu_max_cluster_pwr_dwn
/* ---------------------------------------------
- * Disable the Data Cache.
- * ---------------------------------------------
- */
- mrs x1, sctlr_el3
- bic x1, x1, #SCTLR_C_BIT
- msr sctlr_el3, x1
- isb
-
- /* ---------------------------------------------
* Flush all caches to PoC.
* ---------------------------------------------
*/
diff --git a/lib/cpus/aarch64/travis.S b/lib/cpus/aarch64/travis.S
index d53e46f..a959acb 100644
--- a/lib/cpus/aarch64/travis.S
+++ b/lib/cpus/aarch64/travis.S
@@ -21,10 +21,6 @@
#error "Travis supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
-#if FEAT_PABANDON == 0
-#error "Travis must be compiled with FEAT_PABANDON enabled"
-#endif
-
#if ERRATA_SME_POWER_DOWN == 0
#error "Travis needs ERRATA_SME_POWER_DOWN=1 to powerdown correctly"
#endif
@@ -52,6 +48,7 @@
sysreg_bit_toggle TRAVIS_IMP_CPUPWRCTLR_EL1, \
TRAVIS_IMP_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
isb
+ signal_pabandon_handled
ret
endfunc travis_core_pwr_dwn
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index e188f74..2a6158a 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -150,21 +150,27 @@
manage_extensions_secure(ctx);
}
-#if ENABLE_RME
+#if ENABLE_RME && IMAGE_BL31
/******************************************************************************
* This function performs initializations that are specific to REALM state
* and updates the cpu context specified by 'ctx'.
+ *
+ * NOTE: any changes to this function must be verified by an RMMD maintainer.
*****************************************************************************/
static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep)
{
u_register_t scr_el3;
el3_state_t *state;
+ el2_sysregs_t *el2_ctx;
state = get_el3state_ctx(ctx);
scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
+ el2_ctx = get_el2_sysregs_ctx(ctx);
scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT;
+ write_el2_ctx_common(el2_ctx, spsr_el2, SPSR_EL2_REALM);
+
/* CSV2 version 2 and above */
if (is_feat_csv2_2_supported()) {
/* Enable access to the SCXTNUM_ELx registers. */
@@ -201,8 +207,22 @@
brbe_enable(ctx);
}
+ /*
+ * Enable access to TPIDR2_EL0 if SME/SME2 is enabled for Non Secure world.
+ */
+ if (is_feat_sme_supported()) {
+ sme_enable(ctx);
+ }
+
+ if (is_feat_spe_supported()) {
+ spe_disable_realm(ctx);
+ }
+
+ if (is_feat_trbe_supported()) {
+ trbe_disable_realm(ctx);
+ }
}
-#endif /* ENABLE_RME */
+#endif /* ENABLE_RME && IMAGE_BL31 */
/******************************************************************************
* This function performs initializations that are specific to NON-SECURE state
@@ -309,12 +329,6 @@
/* Initialize EL2 context registers */
#if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
-
- /*
- * Initialize SCTLR_EL2 context register with reset value.
- */
- write_el2_ctx_common(get_el2_sysregs_ctx(ctx), sctlr_el2, SCTLR_EL2_RES1);
-
if (is_feat_hcx_supported()) {
/*
* Initialize register HCRX_EL2 with its init value.
@@ -581,6 +595,13 @@
}
pmuv3_enable(ctx);
+
+#if CTX_INCLUDE_EL2_REGS
+ /*
+ * Initialize SCTLR_EL2 context register with reset value.
+ */
+ write_el2_ctx_common(get_el2_sysregs_ctx(ctx), sctlr_el2, SCTLR_EL2_RES1);
+#endif /* CTX_INCLUDE_EL2_REGS */
#endif /* IMAGE_BL31 */
/*
@@ -636,7 +657,7 @@
case SECURE:
setup_secure_context(ctx, ep);
break;
-#if ENABLE_RME
+#if ENABLE_RME && IMAGE_BL31
case REALM:
setup_realm_context(ctx, ep);
break;
@@ -859,20 +880,15 @@
debugv8p9_extended_bp_wp_enable(ctx);
}
- /*
- * SPE, TRBE, and BRBE have multi-field enables that affect which world
- * they apply to. Despite this, it is useful to ignore these for
- * simplicity in determining the feature's per world enablement status.
- * This is only possible when context is written per-world. Relied on
- * by SMCCC_ARCH_FEATURE_AVAILABILITY
- */
if (is_feat_spe_supported()) {
- spe_enable(ctx);
+ spe_enable_ns(ctx);
}
- if (!check_if_trbe_disable_affected_core()) {
- if (is_feat_trbe_supported()) {
- trbe_enable(ctx);
+ if (is_feat_trbe_supported()) {
+ if (check_if_trbe_disable_affected_core()) {
+ trbe_disable_ns(ctx);
+ } else {
+ trbe_enable_ns(ctx);
}
}
@@ -958,18 +974,12 @@
}
}
- /*
- * SPE and TRBE cannot be fully disabled from EL3 registers alone, only
- * sysreg access can. In case the EL1 controls leave them active on
- * context switch, we want the owning security state to be NS so Secure
- * can't be DOSed.
- */
if (is_feat_spe_supported()) {
- spe_disable(ctx);
+ spe_disable_secure(ctx);
}
if (is_feat_trbe_supported()) {
- trbe_disable(ctx);
+ trbe_disable_secure(ctx);
}
#endif /* IMAGE_BL31 */
}
diff --git a/lib/extensions/brbe/brbe.c b/lib/extensions/brbe/brbe.c
index f951654..b34fc5d 100644
--- a/lib/extensions/brbe/brbe.c
+++ b/lib/extensions/brbe/brbe.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -17,7 +17,7 @@
/*
* MDCR_EL3.SBRBE = 0b01
* Allows BRBE usage in non-secure world and prohibited in
- * secure world.
+ * secure world. This is relied on by SMCCC_ARCH_FEATURE_AVAILABILITY.
*
* MDCR_EL3.{E3BREW, E3BREC} = 0b00
* Branch recording at EL3 is disabled
diff --git a/lib/extensions/spe/spe.c b/lib/extensions/spe/spe.c
index e499486..ddd8516 100644
--- a/lib/extensions/spe/spe.c
+++ b/lib/extensions/spe/spe.c
@@ -13,47 +13,78 @@
#include <plat/common/platform.h>
-void spe_enable(cpu_context_t *ctx)
+/*
+ * SPE is an unusual feature. Its enable is split into two:
+ * - (NSPBE, NSPB[0]) - the security state bits - determines which security
+ * state owns the profiling buffer.
+ * - NSPB[1] - the enable bit - determines if the security state that owns the
+ * buffer may access SPE registers.
+ *
+ * There is a secondary id register PMBIDR_EL1 that is more granular than
+ * ID_AA64DFR0_EL1. When a security state owns the buffer, PMBIDR_EL1.P will
+ * report that SPE programming is allowed. This means that the usual assumption
+ * that leaving all bits to a default of zero will disable the feature may not
+ * work correctly. To correctly disable SPE, the current security state must NOT
+ * own the buffer, irrespective of the enable bit. Then, to play nicely with
+ * SMCCC_ARCH_FEATURE_AVAILABILITY, the enable bit should correspond to the
+ * enable status. The feature is architected this way to allow for lazy context
+ * switching of the buffer - a world can be made owner of the buffer (with
+ * PMBIDR_EL1.P reporting full access) without giving it access to the registers
+ * (by trapping to EL3). Then context switching can be deferred until a world
+ * tries to use SPE at which point access can be given and the trapping
+ * instruction repeated.
+ *
+ * This can be simplified to the following rules:
+ * 1. To enable SPE for world X:
+ * * world X owns the buffer ((NSPBE, NSPB[0]) == SCR_EL3.{NSE, NS})
+ * * trapping disabled (NSPB[0] == 1)
+ * 2. To disable SPE for world X:
+ * * world X does not own the buffer ((NSPBE, NSPB[0]) != SCR_EL3.{NSE, NS})
+ * * trapping enabled (NSPB[0] == 0)
+ */
+
+/*
+ * MDCR_EL3.EnPMSN (ARM v8.7) and MDCR_EL3.EnPMS3: Do not trap access to
+ * PMSNEVFR_EL1 or PMSDSFR_EL1 register at NS-EL1 or NS-EL2 to EL3 if
+ * FEAT_SPEv1p2 or FEAT_SPE_FDS are implemented. Setting these bits to 1 doesn't
+ * have any effect on it when the features aren't implemented.
+ */
+void spe_enable_ns(cpu_context_t *ctx)
{
el3_state_t *state = get_el3state_ctx(ctx);
u_register_t mdcr_el3_val = read_ctx_reg(state, CTX_MDCR_EL3);
- /*
- * MDCR_EL3.NSPB (ARM v8.2): SPE enabled in Non-secure state
- * and disabled in secure state. Accesses to SPE registers at
- * S-EL1 generate trap exceptions to EL3.
- *
- * MDCR_EL3.NSPBE: Profiling Buffer uses Non-secure Virtual Addresses.
- * When FEAT_RME is not implemented, this field is RES0.
- *
- * MDCR_EL3.EnPMSN (ARM v8.7) and MDCR_EL3.EnPMS3: Do not trap access to
- * PMSNEVFR_EL1 or PMSDSFR_EL1 register at NS-EL1 or NS-EL2 to EL3 if FEAT_SPEv1p2
- * or FEAT_SPE_FDS are implemented. Setting these bits to 1 doesn't have any
- * effect on it when the features aren't implemented.
- */
- mdcr_el3_val |= MDCR_NSPB(MDCR_NSPB_EL1) | MDCR_EnPMSN_BIT | MDCR_EnPMS3_BIT;
+ mdcr_el3_val |= MDCR_NSPB_EN_BIT | MDCR_NSPB_SS_BIT | MDCR_EnPMSN_BIT | MDCR_EnPMS3_BIT;
mdcr_el3_val &= ~(MDCR_NSPBE_BIT);
+
write_ctx_reg(state, CTX_MDCR_EL3, mdcr_el3_val);
}
-void spe_disable(cpu_context_t *ctx)
+/*
+ * MDCR_EL3.EnPMSN (ARM v8.7) and MDCR_EL3.EnPMS3: Clear the bits to trap access
+ * of PMSNEVFR_EL1 and PMSDSFR_EL1 from EL2/EL1 to EL3.
+ */
+static void spe_disable_others(cpu_context_t *ctx)
{
el3_state_t *state = get_el3state_ctx(ctx);
u_register_t mdcr_el3_val = read_ctx_reg(state, CTX_MDCR_EL3);
- /*
- * MDCR_EL3.{NSPB,NSPBE} = 0b00, 0b0
- * Disable access of profiling buffer control registers from lower ELs
- * in any security state. Secure state owns the buffer.
- *
- * MDCR_EL3.EnPMSN (ARM v8.7) and MDCR_EL3.EnPMS3: Clear the bits to trap access
- * of PMSNEVFR_EL1 and PMSDSFR_EL1 from EL2/EL1 to EL3.
- */
- mdcr_el3_val &= ~(MDCR_NSPB(MDCR_NSPB_EL1) | MDCR_NSPBE_BIT | MDCR_EnPMSN_BIT |
+ mdcr_el3_val |= MDCR_NSPB_SS_BIT;
+ mdcr_el3_val &= ~(MDCR_NSPB_EN_BIT | MDCR_NSPBE_BIT | MDCR_EnPMSN_BIT |
MDCR_EnPMS3_BIT);
write_ctx_reg(state, CTX_MDCR_EL3, mdcr_el3_val);
}
+void spe_disable_secure(cpu_context_t *ctx)
+{
+ spe_disable_others(ctx);
+}
+
+void spe_disable_realm(cpu_context_t *ctx)
+{
+ spe_disable_others(ctx);
+}
+
void spe_init_el2_unused(void)
{
uint64_t v;
diff --git a/lib/extensions/trbe/trbe.c b/lib/extensions/trbe/trbe.c
index 24d88ae..eeae0a7 100644
--- a/lib/extensions/trbe/trbe.c
+++ b/lib/extensions/trbe/trbe.c
@@ -9,41 +9,82 @@
#include <arch_helpers.h>
#include <lib/extensions/trbe.h>
-void trbe_enable(cpu_context_t *ctx)
+
+/*
+ * TRBE is an unusual feature. Its enable is split into two:
+ * - (NSTBE, NSTB[0]) - the security state bits - determines which security
+ * state owns the trace buffer.
+ * - NSTB[1] - the enable bit - determines if the security state that owns the
+ * buffer may access TRBE registers.
+ *
+ * There is a secondary id register TRBIDR_EL1 that is more granular than
+ * ID_AA64DFR0_EL1. When a security state owns the buffer, TRBIDR_EL1.P will
+ * report that TRBE programming is allowed. This means that the usual assumption
+ * that leaving all bits to a default of zero will disable the feature may not
+ * work correctly. To correctly disable TRBE, the current security state must NOT
+ * own the buffer, irrespective of the enable bit. Then, to play nicely with
+ * SMCCC_ARCH_FEATURE_AVAILABILITY, the enable bit should correspond to the
+ * enable status. The feature is architected this way to allow for lazy context
+ * switching of the buffer - a world can be made owner of the buffer (with
+ * TRBIDR_EL1.P reporting full access) without giving it access to the registers
+ * (by trapping to EL3). Then context switching can be deferred until a world
+ * tries to use TRBE at which point access can be given and the trapping
+ * instruction repeated.
+ *
+ * This can be simplified to the following rules:
+ * 1. To enable TRBE for world X:
+ * * world X owns the buffer ((NSTBE, NSTB[0]) == SCR_EL3.{NSE, NS})
+ * * trapping disabled (NSTB[0] == 1)
+ * 2. To disable TRBE for world X:
+ * * world X does not own the buffer ((NSTBE, NSTB[0]) != SCR_EL3.{NSE, NS})
+ * * trapping enabled (NSTB[0] == 0)
+ */
+void trbe_enable_ns(cpu_context_t *ctx)
{
el3_state_t *state = get_el3state_ctx(ctx);
u_register_t mdcr_el3_val = read_ctx_reg(state, CTX_MDCR_EL3);
- /*
- * MDCR_EL3.NSTBE = 0b0
- * Trace Buffer owning Security state is Non-secure state. If FEAT_RME
- * is not implemented, this field is RES0.
- *
- * MDCR_EL3.NSTB = 0b11
- * Allow access of trace buffer control registers from NS-EL1 and
- * NS-EL2, tracing is prohibited in Secure and Realm state (if
- * implemented).
- */
- mdcr_el3_val |= MDCR_NSTB(MDCR_NSTB_EL1);
+ mdcr_el3_val |= MDCR_NSTB_EN_BIT | MDCR_NSTB_SS_BIT;
mdcr_el3_val &= ~(MDCR_NSTBE_BIT);
+
write_ctx_reg(state, CTX_MDCR_EL3, mdcr_el3_val);
}
-void trbe_disable(cpu_context_t *ctx)
+static void trbe_disable_all(cpu_context_t *ctx, bool ns)
{
el3_state_t *state = get_el3state_ctx(ctx);
u_register_t mdcr_el3_val = read_ctx_reg(state, CTX_MDCR_EL3);
- /*
- * MDCR_EL3.{NSTBE,NSTB} = 0b0, 0b00
- * Disable access of trace buffer control registers from lower ELs in
- * any security state. Secure state owns the buffer.
- */
- mdcr_el3_val &= ~(MDCR_NSTB(MDCR_NSTB_EL1));
- mdcr_el3_val &= ~(MDCR_NSTBE_BIT);
+ mdcr_el3_val &= ~MDCR_NSTB_EN_BIT;
+ mdcr_el3_val &= ~MDCR_NSTBE_BIT;
+
+ /* make NS owner, except when NS is running */
+ if (ns) {
+ mdcr_el3_val &= ~MDCR_NSTB_SS_BIT;
+ } else {
+ mdcr_el3_val |= MDCR_NSTB_SS_BIT;
+ }
+
write_ctx_reg(state, CTX_MDCR_EL3, mdcr_el3_val);
}
+
+void trbe_disable_ns(cpu_context_t *ctx)
+{
+ trbe_disable_all(ctx, true);
+}
+
+void trbe_disable_secure(cpu_context_t *ctx)
+{
+ trbe_disable_all(ctx, false);
+}
+
+void trbe_disable_realm(cpu_context_t *ctx)
+{
+ trbe_disable_all(ctx, false);
+}
+
+
void trbe_init_el2_unused(void)
{
/*
diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S
index 4e1013c..493715a 100644
--- a/lib/psci/aarch32/psci_helpers.S
+++ b/lib/psci/aarch32/psci_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,44 +10,47 @@
.globl psci_do_pwrdown_cache_maintenance
.globl psci_do_pwrup_cache_maintenance
- .globl psci_power_down_wfi
/* -----------------------------------------------------------------------
- * void psci_do_pwrdown_cache_maintenance(unsigned int power level);
+ * void psci_do_pwrdown_cache_maintenance(void);
*
- * This function performs cache maintenance for the specified power
- * level. The levels of cache affected are determined by the power
- * level which is passed as the argument i.e. level 0 results
- * in a flush of the L1 cache. Both the L1 and L2 caches are flushed
- * for a higher power level.
- *
- * Additionally, this function also ensures that stack memory is correctly
- * flushed out to avoid coherency issues due to a change in its memory
- * attributes after the data cache is disabled.
+ * This function turns off data caches and also ensures that stack memory
+ * is correctly flushed out to avoid coherency issues due to a change in
+ * its memory attributes.
* -----------------------------------------------------------------------
*/
func psci_do_pwrdown_cache_maintenance
push {r4, lr}
+ bl plat_get_my_stack
- /* ----------------------------------------------
- * Turn OFF cache and do stack maintenance
- * prior to cpu operations . This sequence is
- * different from AArch64 because in AArch32 the
- * assembler routines for cpu operations utilize
- * the stack whereas in AArch64 it doesn't.
- * ----------------------------------------------
- */
- mov r4, r0
- bl do_stack_maintenance
+ /* Turn off the D-cache */
+ ldcopr r1, SCTLR
+ bic r1, #SCTLR_C_BIT
+ stcopr r1, SCTLR
+ isb
/* ---------------------------------------------
- * Invoke CPU-specifc power down operations for
- * the appropriate level
+ * Calculate and store the size of the used
+ * stack memory in r1.
* ---------------------------------------------
*/
- mov r0, r4
- pop {r4, lr}
- b prepare_cpu_pwr_dwn
+ mov r4, r0
+ mov r1, sp
+ sub r1, r0, r1
+ mov r0, sp
+ bl flush_dcache_range
+
+ /* ---------------------------------------------
+ * Calculate and store the size of the unused
+ * stack memory in r1. Calculate and store the
+ * stack base address in r0.
+ * ---------------------------------------------
+ */
+ sub r0, r4, #PLATFORM_STACK_SIZE
+ sub r1, sp, r0
+ bl inv_dcache_range
+
+ pop {r4, pc}
endfunc psci_do_pwrdown_cache_maintenance
@@ -93,57 +96,3 @@
pop {r12, pc}
endfunc psci_do_pwrup_cache_maintenance
-
- /* ---------------------------------------------
- * void do_stack_maintenance(void)
- * Do stack maintenance by flushing the used
- * stack to the main memory and invalidating the
- * remainder.
- * ---------------------------------------------
- */
-func do_stack_maintenance
- push {r4, lr}
- bl plat_get_my_stack
-
- /* Turn off the D-cache */
- ldcopr r1, SCTLR
- bic r1, #SCTLR_C_BIT
- stcopr r1, SCTLR
- isb
-
- /* ---------------------------------------------
- * Calculate and store the size of the used
- * stack memory in r1.
- * ---------------------------------------------
- */
- mov r4, r0
- mov r1, sp
- sub r1, r0, r1
- mov r0, sp
- bl flush_dcache_range
-
- /* ---------------------------------------------
- * Calculate and store the size of the unused
- * stack memory in r1. Calculate and store the
- * stack base address in r0.
- * ---------------------------------------------
- */
- sub r0, r4, #PLATFORM_STACK_SIZE
- sub r1, sp, r0
- bl inv_dcache_range
-
- pop {r4, pc}
-endfunc do_stack_maintenance
-
-/* -----------------------------------------------------------------------
- * This function is called to indicate to the power controller that it
- * is safe to power down this cpu. It should not exit the wfi and will
- * be released from reset upon power up.
- * -----------------------------------------------------------------------
- */
-func psci_power_down_wfi
- dsb sy // ensure write buffer empty
-1:
- wfi
- b 1b
-endfunc psci_power_down_wfi
diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S
index b297f9b..ce8adc2 100644
--- a/lib/psci/aarch64/psci_helpers.S
+++ b/lib/psci/aarch64/psci_helpers.S
@@ -12,32 +12,24 @@
.globl psci_do_pwrdown_cache_maintenance
.globl psci_do_pwrup_cache_maintenance
- .globl psci_power_down_wfi
/* -----------------------------------------------------------------------
- * void psci_do_pwrdown_cache_maintenance(unsigned int power level);
+ * void psci_do_pwrdown_cache_maintenance(void);
*
- * This function performs cache maintenance for the specified power
- * level. The levels of cache affected are determined by the power
- * level which is passed as the argument i.e. level 0 results
- * in a flush of the L1 cache. Both the L1 and L2 caches are flushed
- * for a higher power level.
- *
- * Additionally, this function also ensures that stack memory is correctly
- * flushed out to avoid coherency issues due to a change in its memory
- * attributes after the data cache is disabled.
+ * This function turns off data caches and also ensures that stack memory
+ * is correctly flushed out to avoid coherency issues due to a change in
+ * its memory attributes.
* -----------------------------------------------------------------------
*/
func psci_do_pwrdown_cache_maintenance
stp x29, x30, [sp,#-16]!
stp x19, x20, [sp,#-16]!
- /* ---------------------------------------------
- * Invoke CPU-specific power down operations for
- * the appropriate level
- * ---------------------------------------------
- */
- bl prepare_cpu_pwr_dwn
+ /* Disable L1 data cache and unified L2 cache */
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
/* ---------------------------------------------
* Do stack maintenance by flushing the used
@@ -116,26 +108,3 @@
ldp x29, x30, [sp], #16
ret
endfunc psci_do_pwrup_cache_maintenance
-
-/* -----------------------------------------------------------------------
- * void psci_power_down_wfi(void); This function is called to indicate to the
- * power controller that it is safe to power down this cpu. It may exit if the
- * request was denied and reset did not occur
- * -----------------------------------------------------------------------
- */
-func psci_power_down_wfi
- apply_erratum cortex_a510, ERRATUM(2684597), ERRATA_A510_2684597
-
- dsb sy // ensure write buffer empty
- wfi
-
- /*
- * in case the WFI wasn't terminal, we have to undo errata mitigations.
- * These will be smart enough to handle being called the same way
- */
- apply_erratum cortex_a710, ERRATUM(2291219), ERRATA_A710_2291219
- apply_erratum cortex_x3, ERRATUM(2313909), ERRATA_X3_2313909, NO_GET_CPU_REV
- apply_erratum neoverse_n2, ERRATUM(2326639), ERRATA_N2_2326639, NO_GET_CPU_REV
-
- ret
-endfunc psci_power_down_wfi
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index 1021ef6..bc1bad0 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -14,6 +14,7 @@
#include <common/debug.h>
#include <context.h>
#include <drivers/delay_timer.h>
+#include <lib/cpus/cpu_ops.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/extensions/spe.h>
#include <lib/pmf/pmf.h>
@@ -1003,6 +1004,11 @@
unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
+#if FEATURE_DETECTION
+ /* Detect if features enabled during compilation are supported by PE. */
+ detect_arch_features(cpu_idx);
+#endif /* FEATURE_DETECTION */
+
/* Init registers that never change for the lifetime of TF-A */
cm_manage_extensions_el3(cpu_idx);
@@ -1055,7 +1061,7 @@
unsigned int max_off_lvl = psci_find_max_off_lvl(&state_info);
assert(max_off_lvl != PSCI_INVALID_PWR_LVL);
- psci_cpu_suspend_to_powerdown_finish(cpu_idx, max_off_lvl, &state_info);
+ psci_cpu_suspend_to_powerdown_finish(cpu_idx, max_off_lvl, &state_info, false);
}
/*
@@ -1195,6 +1201,44 @@
return (n_valid > 1U) ? 1 : 0;
}
+static u_register_t call_cpu_pwr_dwn(unsigned int power_level)
+{
+ struct cpu_ops *ops = get_cpu_data(cpu_ops_ptr);
+
+ /* Call the last available power down handler */
+ if (power_level > CPU_MAX_PWR_DWN_OPS - 1) {
+ power_level = CPU_MAX_PWR_DWN_OPS - 1;
+ }
+
+ assert(ops != NULL);
+ assert(ops->pwr_dwn_ops[power_level] != NULL);
+
+ return ops->pwr_dwn_ops[power_level]();
+}
+
+static void prepare_cpu_pwr_dwn(unsigned int power_level)
+{
+ /* ignore the return, all cpus should behave the same */
+ (void)call_cpu_pwr_dwn(power_level);
+}
+
+static void prepare_cpu_pwr_up(unsigned int power_level)
+{
+ /*
+ * Call the pwr_dwn cpu hook again, indicating that an abandon happened.
+ * The cpu driver is expected to clean up. We ask it to return
+ * PABANDON_ACK to indicate that it has handled this. This is a
+ * heuristic: the value has been chosen such that an unported CPU is
+ * extremely unlikely to return this value.
+ */
+ u_register_t ret = call_cpu_pwr_dwn(power_level);
+
+ /* unreachable on AArch32 so cast down to calm the compiler */
+ if (ret != (u_register_t) PABANDON_ACK) {
+ panic();
+ }
+}
+
/*******************************************************************************
* Initiate power down sequence, by calling power down operations registered for
* this CPU.
@@ -1212,26 +1256,24 @@
PMF_CACHE_MAINT);
#endif
-#if HW_ASSISTED_COHERENCY
+#if !HW_ASSISTED_COHERENCY
/*
- * With hardware-assisted coherency, the CPU drivers only initiate the
- * power down sequence, without performing cache-maintenance operations
- * in software. Data caches enabled both before and after this call.
- */
- prepare_cpu_pwr_dwn(power_level);
-#else
- /*
- * Without hardware-assisted coherency, the CPU drivers disable data
- * caches, then perform cache-maintenance operations in software.
+ * Disable data caching and handle the stack's cache maintenance.
*
- * This also calls prepare_cpu_pwr_dwn() to initiate power down
- * sequence, but that function will return with data caches disabled.
- * We must ensure that the stack memory is flushed out to memory before
- * we start popping from it again.
+ * If the core can't automatically exit coherency, the cpu driver needs
+ * to flush caches and exit coherency. We can't do this with data caches
+ * enabled. The cpu driver will decide which caches to flush based on
+ * the power level.
+ *
+ * If automatic coherency management is possible, we can keep data
+ * caches on until the very end and let hardware do cache maintenance.
*/
- psci_do_pwrdown_cache_maintenance(power_level);
+ psci_do_pwrdown_cache_maintenance();
#endif
+ /* Initiate the power down sequence by calling into the cpu driver. */
+ prepare_cpu_pwr_dwn(power_level);
+
#if ENABLE_RUNTIME_INSTRUMENTATION
PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
RT_INSTR_EXIT_CFLUSH,
@@ -1257,6 +1299,9 @@
}
#endif /* ERRATA_SME_POWER_DOWN */
+ /* ensure write buffer empty */
+ dsbsy();
+
/*
* Execute a wfi which, in most cases, will allow the power controller
* to physically power down this cpu. Under some circumstances that may
@@ -1264,7 +1309,7 @@
* power down.
*/
for (int i = 0; i < 32; i++)
- psci_power_down_wfi();
+ wfi();
/* Wake up wasn't transient. System is probably in a bad state. */
ERROR("Could not power off CPU.\n");
@@ -1278,31 +1323,30 @@
void psci_pwrdown_cpu_end_wakeup(unsigned int power_level)
{
+ /* ensure write buffer empty */
+ dsbsy();
+
/*
- * Usually, will be terminal. In some circumstances the powerdown will
- * be denied and we'll need to unwind
+ * Turn the core off. Usually, will be terminal. In some circumstances
+ * the powerdown will be denied and we'll need to unwind.
*/
- psci_power_down_wfi();
+ wfi();
/*
* Waking up does not require hardware-assisted coherency, but that is
- * the case for every core that can wake up. Untangling the cache
- * coherency code from powerdown is a non-trivial effort which isn't
- * needed for our purposes.
+ * the case for every core that can wake up. Can either happen because
+ * of errata or pabandon.
*/
-#if !FEAT_PABANDON
- ERROR("Systems without FEAT_PABANDON shouldn't wake up.\n");
+#if !defined(__aarch64__) || !HW_ASSISTED_COHERENCY
+ ERROR("AArch32 systems shouldn't wake up.\n");
panic();
-#else /* FEAT_PABANDON */
-
+#endif
/*
* Begin unwinding. Everything can be shared with CPU_ON and co later,
* except the CPU specific bit. Cores that have hardware-assisted
- * coherency don't have much to do so just calling the hook again is
- * the simplest way to achieve this
+ * coherency should be able to handle this.
*/
- prepare_cpu_pwr_dwn(power_level);
-#endif /* FEAT_PABANDON */
+ prepare_cpu_pwr_up(power_level);
}
/*******************************************************************************
diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h
index f3f5a5c..446f23d 100644
--- a/lib/psci/psci_private.h
+++ b/lib/psci/psci_private.h
@@ -323,13 +323,6 @@
bool psci_is_last_on_cpu(unsigned int my_idx);
int psci_spd_migrate_info(u_register_t *mpidr);
-/*
- * CPU power down is directly called only when HW_ASSISTED_COHERENCY is
- * available. Otherwise, this needs post-call stack maintenance, which is
- * handled in assembly.
- */
-void prepare_cpu_pwr_dwn(unsigned int power_level);
-
/* This function applies various CPU errata during power down. */
void apply_cpu_pwr_dwn_errata(void);
@@ -348,10 +341,13 @@
psci_power_state_t *state_info,
unsigned int is_power_down_state);
-void psci_cpu_suspend_to_powerdown_finish(unsigned int cpu_idx, unsigned int max_off_lvl, const psci_power_state_t *state_info);
+void psci_cpu_suspend_to_powerdown_finish(unsigned int cpu_idx,
+ unsigned int max_off_lvl,
+ const psci_power_state_t *state_info,
+ bool abandon);
/* Private exported functions from psci_helpers.S */
-void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
+void psci_do_pwrdown_cache_maintenance(void);
void psci_do_pwrup_cache_maintenance(void);
/* Private exported functions from psci_system_off.c */
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
index 73b9a67..c04c547 100644
--- a/lib/psci/psci_suspend.c
+++ b/lib/psci/psci_suspend.c
@@ -184,17 +184,6 @@
#endif
if (is_power_down_state != 0U) {
- /*
- * WHen CTX_INCLUDE_EL2_REGS is usnet, we're probably runnig
- * with some SPD that assumes the core is going off so it
- * doesn't bother saving NS's context. Do that here until we
- * figure out a way to make this coherent.
- */
-#if FEAT_PABANDON
-#if !CTX_INCLUDE_EL2_REGS
- cm_el1_sysregs_context_save(NON_SECURE);
-#endif
-#endif
max_off_lvl = psci_find_max_off_lvl(state_info);
psci_suspend_to_pwrdown_start(idx, end_pwrlvl, end_pwrlvl, state_info);
}
@@ -274,13 +263,7 @@
* the system back to a usable state.
*/
if (is_power_down_state != 0U) {
-#if FEAT_PABANDON
- psci_cpu_suspend_to_powerdown_finish(idx, max_off_lvl, state_info);
-
-#if !CTX_INCLUDE_EL2_REGS
- cm_el1_sysregs_context_restore(NON_SECURE);
-#endif
-#endif
+ psci_cpu_suspend_to_powerdown_finish(idx, max_off_lvl, state_info, true);
} else {
psci_cpu_suspend_to_standby_finish(end_pwrlvl, state_info);
}
@@ -307,7 +290,7 @@
* are called by the common finisher routine in psci_common.c. The `state_info`
* is the psci_power_state from which this CPU has woken up from.
******************************************************************************/
-void psci_cpu_suspend_to_powerdown_finish(unsigned int cpu_idx, unsigned int max_off_lvl, const psci_power_state_t *state_info)
+void psci_cpu_suspend_to_powerdown_finish(unsigned int cpu_idx, unsigned int max_off_lvl, const psci_power_state_t *state_info, bool abandon)
{
unsigned int counter_freq;
@@ -335,9 +318,11 @@
gic_cpuif_enable(cpu_idx);
#endif /* USE_GIC_DRIVER */
- /* Re-init the cntfrq_el0 register */
- counter_freq = plat_get_syscnt_freq2();
- write_cntfrq_el0(counter_freq);
+ if (!abandon) {
+ /* Re-init the cntfrq_el0 register */
+ counter_freq = plat_get_syscnt_freq2();
+ write_cntfrq_el0(counter_freq);
+ }
/*
* Call the cpu suspend finish handler registered by the Secure Payload
@@ -345,7 +330,7 @@
* error, it's expected to assert within
*/
if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend_finish != NULL)) {
- psci_spd_pm->svc_suspend_finish(max_off_lvl);
+ psci_spd_pm->svc_suspend_finish(max_off_lvl, abandon);
}
/* This loses its meaning when not suspending, reset so it's correct for OFF */
diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c
index ae9244a..81e53ee 100644
--- a/lib/xlat_tables_v2/xlat_tables_context.c
+++ b/lib/xlat_tables_v2/xlat_tables_context.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -112,7 +112,7 @@
int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
{
- return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
+ return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr, NULL);
}
int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index a3b913c..94b3347 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -487,10 +487,10 @@
int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
- uint32_t *attr)
+ uint32_t *attr, unsigned int *table_level)
{
return xlat_get_mem_attributes_internal(ctx, base_va, attr,
- NULL, NULL, NULL);
+ NULL, NULL, table_level);
}
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 516d9b4..1ddcd6f 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -88,9 +88,6 @@
# Enable the Maximum Power Mitigation Mechanism on supporting cores.
ENABLE_MPMM := 0
-# Enable support for powerdown abandons
-FEAT_PABANDON := 0
-
# Flag to Enable Position Independant support (PIE)
ENABLE_PIE := 0
diff --git a/plat/allwinner/common/sunxi_native_pm.c b/plat/allwinner/common/sunxi_native_pm.c
index 558b0bb..28abd3c 100644
--- a/plat/allwinner/common/sunxi_native_pm.c
+++ b/plat/allwinner/common/sunxi_native_pm.c
@@ -38,7 +38,7 @@
gicv2_cpuif_enable();
}
-static void __dead2 sunxi_system_off(void)
+static void sunxi_system_off(void)
{
gicv2_cpuif_disable();
@@ -48,9 +48,6 @@
/* Turn off all CPUs */
sunxi_cpu_power_off_others();
sunxi_cpu_power_off_self();
- psci_power_down_wfi();
- /* should never reach here */
- panic();
}
static void __dead2 sunxi_system_reset(void)
diff --git a/plat/allwinner/common/sunxi_scpi_pm.c b/plat/allwinner/common/sunxi_scpi_pm.c
index 8870a71..2341c09 100644
--- a/plat/allwinner/common/sunxi_scpi_pm.c
+++ b/plat/allwinner/common/sunxi_scpi_pm.c
@@ -95,7 +95,7 @@
}
}
-static void __dead2 sunxi_system_off(void)
+static void sunxi_system_off(void)
{
uint32_t ret;
@@ -106,13 +106,9 @@
if (ret != SCP_OK) {
ERROR("PSCI: SCPI %s failed: %d\n", "shutdown", ret);
}
-
- psci_power_down_wfi();
- /* should never reach here */
- panic();
}
-static void __dead2 sunxi_system_reset(void)
+static void sunxi_system_reset(void)
{
uint32_t ret;
@@ -123,10 +119,6 @@
if (ret != SCP_OK) {
ERROR("PSCI: SCPI %s failed: %d\n", "reboot", ret);
}
-
- psci_power_down_wfi();
- /* should never reach here */
- panic();
}
static int sunxi_system_reset2(int is_vendor, int reset_type, u_register_t cookie)
@@ -145,14 +137,8 @@
return PSCI_E_INVALID_PARAMS;
}
- psci_power_down_wfi();
- /* should never reach here */
- panic();
-
/*
- * Should not reach here.
- * However sunxi_system_reset2 has to return some value
- * according to PSCI v1.1 spec.
+ * Continue to core powerdown
*/
return PSCI_E_SUCCESS;
}
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 8e16829..fd7e386 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -243,7 +243,6 @@
#Build AArch64-only CPUs with no FVP model yet.
ifeq (${BUILD_CPUS_WITH_NO_FVP_MODEL},1)
# travis/gelas need these
- FEAT_PABANDON := 1
ERRATA_SME_POWER_DOWN := 1
FVP_CPU_LIBS += lib/cpus/aarch64/cortex_gelas.S \
lib/cpus/aarch64/nevis.S \
diff --git a/plat/arm/board/tc/platform.mk b/plat/arm/board/tc/platform.mk
index b7edf28..4a3dfff 100644
--- a/plat/arm/board/tc/platform.mk
+++ b/plat/arm/board/tc/platform.mk
@@ -123,7 +123,6 @@
# CPU libraries for TARGET_PLATFORM=4
ifeq (${TARGET_PLATFORM}, 4)
-FEAT_PABANDON := 1
# prevent CME related wakups
ERRATA_SME_POWER_DOWN := 1
TC_CPU_SOURCES += lib/cpus/aarch64/cortex_gelas.S \
diff --git a/plat/marvell/armada/a8k/common/plat_pm.c b/plat/marvell/armada/a8k/common/plat_pm.c
index ae3ee37..b171e92 100644
--- a/plat/marvell/armada/a8k/common/plat_pm.c
+++ b/plat/marvell/armada/a8k/common/plat_pm.c
@@ -733,16 +733,16 @@
}
static void
-__dead2 a8k_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state)
+a8k_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state)
{
struct power_off_method *pm_cfg;
unsigned int srcmd;
unsigned int sdram_reg;
register_t gpio_data = 0, gpio_addr = 0;
+ /* let PSCI lib turn the core off */
if (is_pm_fw_running()) {
- psci_power_down_wfi();
- panic();
+ return;
}
pm_cfg = (struct power_off_method *)plat_marvell_get_pm_cfg();
diff --git a/plat/mediatek/drivers/mtcmos/mt8196/mtcmos.c b/plat/mediatek/drivers/mtcmos/mtcmos.c
similarity index 97%
rename from plat/mediatek/drivers/mtcmos/mt8196/mtcmos.c
rename to plat/mediatek/drivers/mtcmos/mtcmos.c
index 1e82bb4..acd41ee 100644
--- a/plat/mediatek/drivers/mtcmos/mt8196/mtcmos.c
+++ b/plat/mediatek/drivers/mtcmos/mtcmos.c
@@ -91,7 +91,7 @@
return 0;
}
-int spm_mtcmos_ctrl(enum mtcmos_state state, uintptr_t reg, uint32_t mask)
+static int spm_mtcmos_ctrl(enum mtcmos_state state, uintptr_t reg, uint32_t mask)
{
int ret = 0;
diff --git a/plat/mediatek/drivers/mtcmos/mt8196/mtcmos.h b/plat/mediatek/drivers/mtcmos/mtcmos.h
similarity index 62%
rename from plat/mediatek/drivers/mtcmos/mt8196/mtcmos.h
rename to plat/mediatek/drivers/mtcmos/mtcmos.h
index 39902bc..925bd09 100644
--- a/plat/mediatek/drivers/mtcmos/mt8196/mtcmos.h
+++ b/plat/mediatek/drivers/mtcmos/mtcmos.h
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef PLAT_MEDIATEK_DRIVERS_MTCMOS_MT8196_MTCMOS_H_
-#define PLAT_MEDIATEK_DRIVERS_MTCMOS_MT8196_MTCMOS_H_
+#ifndef MTCMOS_H
+#define MTCMOS_H
enum mtcmos_state {
STA_POWER_DOWN,
@@ -15,4 +15,4 @@
int spm_mtcmos_ctrl_ufs0(enum mtcmos_state state);
int spm_mtcmos_ctrl_ufs0_phy(enum mtcmos_state state);
-#endif /* PLAT_MEDIATEK_DRIVERS_MTCMOS_MT8196_MTCMOS_H_ */
+#endif /* MTCMOS_H */
diff --git a/plat/mediatek/drivers/mtcmos/rules.mk b/plat/mediatek/drivers/mtcmos/rules.mk
index a8f1df2..c1964a7 100644
--- a/plat/mediatek/drivers/mtcmos/rules.mk
+++ b/plat/mediatek/drivers/mtcmos/rules.mk
@@ -6,8 +6,8 @@
LOCAL_DIR := $(call GET_LOCAL_DIR)
MODULE := mtcmos
-LOCAL_SRCS-y := $(LOCAL_DIR)/${MTK_SOC}/mtcmos.c
+LOCAL_SRCS-y := $(LOCAL_DIR)/mtcmos.c
-PLAT_INCLUDES += -I${LOCAL_DIR}/${MTK_SOC}
+PLAT_INCLUDES += -I${LOCAL_DIR}
$(eval $(call MAKE_MODULE,$(MODULE),$(LOCAL_SRCS-y),$(MTK_BL)))
diff --git a/plat/mediatek/lib/pm/armv9_0/pwr_ctrl.c b/plat/mediatek/lib/pm/armv9_0/pwr_ctrl.c
index 19dcd33..3224244 100644
--- a/plat/mediatek/lib/pm/armv9_0/pwr_ctrl.c
+++ b/plat/mediatek/lib/pm/armv9_0/pwr_ctrl.c
@@ -388,10 +388,6 @@
ret = imtk_cpu_pwr.ops->pwr_domain_pwr_down_wfi(cpu);
if (ret == MTK_CPUPM_E_OK)
plat_panic_handler();
- else
- psci_power_down_wfi();
- /* should never reach here */
- panic();
}
static void pm_smp_init(unsigned int cpu_id, uintptr_t entry_point)
diff --git a/plat/qti/common/src/qti_pm.c b/plat/qti/common/src/qti_pm.c
index 2428126..3f919f2 100644
--- a/plat/qti/common/src/qti_pm.c
+++ b/plat/qti/common/src/qti_pm.c
@@ -211,15 +211,6 @@
}
}
-__dead2 void qti_domain_power_down_wfi(const psci_power_state_t *target_state)
-{
-
- /* For now just do WFI - add any target specific handling if needed */
- psci_power_down_wfi();
- /* We should never reach here */
- panic();
-}
-
static __dead2 void assert_ps_hold(void)
{
mmio_write_32(QTI_PS_HOLD_REG, 0);
@@ -278,7 +269,6 @@
.pwr_domain_off = qti_node_power_off,
.pwr_domain_suspend = qti_node_suspend,
.pwr_domain_suspend_finish = qti_node_suspend_finish,
- .pwr_domain_pwr_down = qti_domain_power_down_wfi,
.system_off = qti_system_off,
.system_reset = qti_system_reset,
.get_node_hw_state = NULL,
diff --git a/plat/rockchip/common/include/plat_private.h b/plat/rockchip/common/include/plat_private.h
index 6388c47..7dcbec9 100644
--- a/plat/rockchip/common/include/plat_private.h
+++ b/plat/rockchip/common/include/plat_private.h
@@ -129,9 +129,9 @@
int rockchip_soc_cores_pwr_dm_resume(void);
void __dead2 rockchip_soc_soft_reset(void);
void __dead2 rockchip_soc_system_off(void);
-void __dead2 rockchip_soc_cores_pd_pwr_dn_wfi(
+void rockchip_soc_cores_pd_pwr_dn_wfi(
const psci_power_state_t *target_state);
-void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void);
+void rockchip_soc_sys_pd_pwr_dn_wfi(void);
extern const unsigned char rockchip_power_domain_tree_desc[];
diff --git a/plat/rockchip/common/plat_pm.c b/plat/rockchip/common/plat_pm.c
index df74033..122bc85 100644
--- a/plat/rockchip/common/plat_pm.c
+++ b/plat/rockchip/common/plat_pm.c
@@ -114,19 +114,13 @@
;
}
-void __dead2 rockchip_soc_cores_pd_pwr_dn_wfi(
+void rockchip_soc_cores_pd_pwr_dn_wfi(
const psci_power_state_t *target_state)
{
- psci_power_down_wfi();
- /* should never reach here */
- panic();
}
-void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
+void rockchip_soc_sys_pd_pwr_dn_wfi(void)
{
- psci_power_down_wfi();
- /* should never reach here */
- panic();
}
/*******************************************************************************
@@ -374,7 +368,7 @@
rockchip_soc_system_off();
}
-static void __dead2 rockchip_pd_pwr_down_wfi(
+static void rockchip_pd_pwr_down_wfi(
const psci_power_state_t *target_state)
{
if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
diff --git a/plat/rockchip/px30/drivers/pmu/pmu.c b/plat/rockchip/px30/drivers/pmu/pmu.c
index 6200cac..2d576bb 100644
--- a/plat/rockchip/px30/drivers/pmu/pmu.c
+++ b/plat/rockchip/px30/drivers/pmu/pmu.c
@@ -999,9 +999,9 @@
* Maybe the HW needs some times to reset the system,
* so we do not hope the core to execute valid codes.
*/
- psci_power_down_wfi();
- /* should never reach here */
- panic();
+ while (1) {
+ wfi();
+ }
}
void __dead2 rockchip_soc_system_off(void)
@@ -1026,9 +1026,9 @@
* Maybe the HW needs some times to reset the system,
* so we do not hope the core to execute valid codes.
*/
- psci_power_down_wfi();
- /* should never reach here */
- panic();
+ while (1) {
+ wfi();
+ }
}
void rockchip_plat_mmu_el3(void)
diff --git a/plat/rockchip/rk3328/drivers/pmu/pmu.c b/plat/rockchip/rk3328/drivers/pmu/pmu.c
index 41660e2..6fd1bfb 100644
--- a/plat/rockchip/rk3328/drivers/pmu/pmu.c
+++ b/plat/rockchip/rk3328/drivers/pmu/pmu.c
@@ -613,14 +613,9 @@
sram_soc_enter_lp();
}
-void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
+void rockchip_soc_sys_pd_pwr_dn_wfi(void)
{
sram_suspend();
-
- /* should never reach here */
- psci_power_down_wfi();
- /* should never reach here */
- panic();
}
int rockchip_soc_sys_pwr_dm_suspend(void)
diff --git a/plat/rockchip/rk3576/drivers/pmu/pmu.c b/plat/rockchip/rk3576/drivers/pmu/pmu.c
index c7db176..ad0132a 100644
--- a/plat/rockchip/rk3576/drivers/pmu/pmu.c
+++ b/plat/rockchip/rk3576/drivers/pmu/pmu.c
@@ -939,21 +939,6 @@
return 0;
}
-void __dead2 rockchip_soc_cores_pd_pwr_dn_wfi(const
- psci_power_state_t *target_state)
-{
- psci_power_down_wfi();
- /* should never reach here */
- panic();
-}
-
-void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
-{
- psci_power_down_wfi();
- /* should never reach here */
- panic();
-}
-
static int rockchip_reboot_is_rbrom(void)
{
return mmio_read_32(PMU0_GRF_BASE + PMU0GRF_OS_REG(16)) ==
@@ -998,9 +983,9 @@
* Maybe the HW needs some times to reset the system,
* so we do not hope the core to execute valid codes.
*/
- psci_power_down_wfi();
- /* should never reach here */
- panic();
+ while (1) {
+ wfi();
+ }
}
void __dead2 rockchip_soc_system_off(void)
@@ -1020,9 +1005,9 @@
* Maybe the HW needs some times to reset the system,
* so we do not hope the core to execute valid codes.
*/
- psci_power_down_wfi();
- /* should never reach here */
- panic();
+ while (1) {
+ wfi();
+ }
}
static void rockchip_pmu_pd_repair_init(void)
diff --git a/plat/rockchip/rk3588/drivers/pmu/pmu.c b/plat/rockchip/rk3588/drivers/pmu/pmu.c
index 16436dd..1a6394d 100644
--- a/plat/rockchip/rk3588/drivers/pmu/pmu.c
+++ b/plat/rockchip/rk3588/drivers/pmu/pmu.c
@@ -1315,20 +1315,9 @@
return 0;
}
-void __dead2 rockchip_soc_cores_pd_pwr_dn_wfi(const
- psci_power_state_t *target_state)
-{
- psci_power_down_wfi();
- /* should never reach here */
- panic();
-}
-
-void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
+void rockchip_soc_sys_pd_pwr_dn_wfi(void)
{
cpus_pd_req_enter_wfi();
- psci_power_down_wfi();
- /* should never reach here */
- panic();
}
void __dead2 rockchip_soc_soft_reset(void)
@@ -1355,9 +1344,9 @@
* Maybe the HW needs some times to reset the system,
* so we do not hope the core to execute valid codes.
*/
- psci_power_down_wfi();
- /* should never reach here */
- panic();
+ while (1) {
+ wfi();
+ }
}
void __dead2 rockchip_soc_system_off(void)
@@ -1378,9 +1367,9 @@
* Maybe the HW needs some times to reset the system,
* so we do not hope the core to execute valid codes.
*/
- psci_power_down_wfi();
- /* should never reach here */
- panic();
+ while (1) {
+ wfi();
+ }
}
static void rockchip_pmu_pd_init(void)
diff --git a/services/spd/opteed/opteed_pm.c b/services/spd/opteed/opteed_pm.c
index c949823..c4a79f5 100644
--- a/services/spd/opteed/opteed_pm.c
+++ b/services/spd/opteed/opteed_pm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -137,12 +137,15 @@
* completed the preceding suspend call. Use that context to program an entry
* into OPTEE to allow it to do any remaining book keeping
******************************************************************************/
-static void opteed_cpu_suspend_finish_handler(u_register_t max_off_pwrlvl)
+static void opteed_cpu_suspend_finish_handler(u_register_t max_off_pwrlvl, bool abandon)
{
int32_t rc = 0;
uint32_t linear_id = plat_my_core_pos();
optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
+ /* opteed is not expected to be used on platforms with pabandon */
+ assert(!abandon);
+
if (get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_UNKNOWN) {
return;
}
diff --git a/services/spd/tlkd/tlkd_pm.c b/services/spd/tlkd/tlkd_pm.c
index ed5bf77..ed66245 100644
--- a/services/spd/tlkd/tlkd_pm.c
+++ b/services/spd/tlkd/tlkd_pm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -69,12 +69,15 @@
* This cpu is being resumed. Inform TLK of the SYSTEM_SUSPEND exit, so
* that it can pass this information to its Trusted Apps.
******************************************************************************/
-static void cpu_resume_handler(u_register_t suspend_level)
+static void cpu_resume_handler(u_register_t suspend_level, bool abandon)
{
gp_regs_t *gp_regs;
int cpu = read_mpidr() & MPIDR_CPU_MASK;
int32_t rc = 0;
+ /* tlkd is not expected to be used on platforms with pabandon */
+ assert(!abandon);
+
/*
* TLK runs only on CPU0 and resumes its Trusted Apps during
* SYSTEM_SUSPEND exit. It has no role to play during CPU_SUSPEND
diff --git a/services/spd/trusty/trusty.c b/services/spd/trusty/trusty.c
index aae2d9a..4b44798 100644
--- a/services/spd/trusty/trusty.c
+++ b/services/spd/trusty/trusty.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -397,12 +397,21 @@
static void trusty_cpu_suspend_handler(u_register_t max_off_lvl)
{
+ /* Save NS context in case we need to return to it */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
trusty_cpu_suspend(max_off_lvl);
}
-static void trusty_cpu_suspend_finish_handler(u_register_t max_off_lvl)
+static void trusty_cpu_suspend_finish_handler(u_register_t max_off_lvl, bool abandon)
{
trusty_cpu_resume(max_off_lvl);
+
+ /* We're returning back to NS so we need to put back its context */
+ if (abandon) {
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ }
+
}
static const spd_pm_ops_t trusty_pm = {
diff --git a/services/spd/tspd/tspd_pm.c b/services/spd/tspd/tspd_pm.c
index b95ee8f..d44a807 100644
--- a/services/spd/tspd/tspd_pm.c
+++ b/services/spd/tspd/tspd_pm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -83,6 +83,10 @@
/* Program the entry point and enter the TSP */
cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_suspend_entry);
+
+ /* Save NS context in case we need to return to it */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
rc = tspd_synchronous_sp_entry(tsp_ctx);
/*
@@ -147,7 +151,7 @@
* completed the preceding suspend call. Use that context to program an entry
* into the TSP to allow it to do any remaining book keeping
******************************************************************************/
-static void tspd_cpu_suspend_finish_handler(u_register_t max_off_pwrlvl)
+static void tspd_cpu_suspend_finish_handler(u_register_t max_off_pwrlvl, bool abandon)
{
int32_t rc = 0;
uint32_t linear_id = plat_my_core_pos();
@@ -170,6 +174,11 @@
if (rc != 0)
panic();
+ /* We're returning back to NS so we need to put back its context */
+ if (abandon) {
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ }
+
/* Update its context to reflect the state the SP is in */
set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);
}
diff --git a/services/std_svc/rmmd/rmmd_initial_context.h b/services/std_svc/rmmd/rmmd_initial_context.h
deleted file mode 100644
index d7a743d..0000000
--- a/services/std_svc/rmmd/rmmd_initial_context.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef RMMD_INITIAL_CONTEXT_H
-#define RMMD_INITIAL_CONTEXT_H
-
-#include <arch.h>
-
-/*
- * SPSR_EL2
- * M=0x9 (0b1001 EL2h)
- * M[4]=0
- * DAIF=0xF Exceptions masked on entry.
- * BTYPE=0 BTI not yet supported.
- * SSBS=0 Not yet supported.
- * IL=0 Not an illegal exception return.
- * SS=0 Not single stepping.
- * PAN=1 RMM shouldn't access realm memory.
- * UAO=0
- * DIT=0
- * TCO=0
- * NZCV=0
- */
-#define REALM_SPSR_EL2 ( \
- SPSR_M_EL2H | \
- (0xF << SPSR_DAIF_SHIFT) | \
- SPSR_PAN_BIT \
- )
-
-#endif /* RMMD_INITIAL_CONTEXT_H */
diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c
index 7435130..fd40f0f 100644
--- a/services/std_svc/rmmd/rmmd_main.c
+++ b/services/std_svc/rmmd/rmmd_main.c
@@ -36,7 +36,6 @@
#include <lib/extensions/sve.h>
#include <lib/extensions/spe.h>
#include <lib/extensions/trbe.h>
-#include "rmmd_initial_context.h"
#include "rmmd_private.h"
/*******************************************************************************
@@ -110,40 +109,6 @@
panic();
}
-static void rmm_el2_context_init(el2_sysregs_t *regs)
-{
- write_el2_ctx_common(regs, spsr_el2, REALM_SPSR_EL2);
- write_el2_ctx_common(regs, sctlr_el2, SCTLR_EL2_RES1);
-}
-
-/*******************************************************************************
- * Enable architecture extensions on first entry to Realm world.
- ******************************************************************************/
-
-static void manage_extensions_realm(cpu_context_t *ctx)
-{
- /*
- * Enable access to TPIDR2_EL0 if SME/SME2 is enabled for Non Secure world.
- */
- if (is_feat_sme_supported()) {
- sme_enable(ctx);
- }
-
- /*
- * SPE and TRBE cannot be fully disabled from EL3 registers alone, only
- * sysreg access can. In case the EL1 controls leave them active on
- * context switch, we want the owning security state to be NS so Realm
- * can't be DOSed.
- */
- if (is_feat_spe_supported()) {
- spe_disable(ctx);
- }
-
- if (is_feat_trbe_supported()) {
- trbe_disable(ctx);
- }
-}
-
/*******************************************************************************
* Jump to the RMM for the first time.
******************************************************************************/
@@ -154,12 +119,6 @@
INFO("RMM init start.\n");
- /* Enable architecture extensions */
- manage_extensions_realm(&ctx->cpu_ctx);
-
- /* Initialize RMM EL2 context. */
- rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
-
rc = rmmd_rmm_sync_entry(ctx);
if (rc != E_RMM_BOOT_SUCCESS) {
ERROR("RMM init failed: %ld\n", rc);
@@ -384,12 +343,6 @@
/* Initialise RMM context with this entry point information */
cm_setup_context(&ctx->cpu_ctx, rmm_ep_info);
- /* Enable architecture extensions */
- manage_extensions_realm(&ctx->cpu_ctx);
-
- /* Initialize RMM EL2 context. */
- rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
-
rc = rmmd_rmm_sync_entry(ctx);
if (rc != E_RMM_BOOT_SUCCESS) {
diff --git a/services/std_svc/spm/el3_spmc/spmc_main.c b/services/std_svc/spm/el3_spmc/spmc_main.c
index 38e29f3..5c4bd49 100644
--- a/services/std_svc/spm/el3_spmc/spmc_main.c
+++ b/services/std_svc/spm/el3_spmc/spmc_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2024, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022-2025, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -1891,7 +1891,11 @@
struct secure_partition_desc *sp;
unsigned int idx;
uintptr_t base_va = (uintptr_t)x1;
- uint32_t tf_attr = 0;
+ uint64_t max_page_count = x2 + 1;
+ uint64_t page_count = 0;
+ uint32_t base_page_attr = 0;
+ uint32_t page_attr = 0;
+ unsigned int table_level;
int ret;
/* This request cannot originate from the Normal world. */
@@ -1923,17 +1927,49 @@
return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
}
+ base_va &= ~(PAGE_SIZE_MASK);
+
/* Request the permissions */
- ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va, &tf_attr);
+ ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va,
+ &base_page_attr, &table_level);
if (ret != 0) {
return spmc_ffa_error_return(handle,
FFA_ERROR_INVALID_PARAMETER);
}
- /* Convert TF-A permission to FF-A permissions attributes. */
- x2 = mmap_perm_to_ffa_perm(tf_attr);
+ /*
+ * Caculate how many pages in this block entry from base_va including
+ * its page.
+ */
+ page_count = ((XLAT_BLOCK_SIZE(table_level) -
+ (base_va & XLAT_BLOCK_MASK(table_level))) >> PAGE_SIZE_SHIFT);
+ base_va += XLAT_BLOCK_SIZE(table_level);
- SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, x2);
+ while ((page_count < max_page_count) && (base_va != 0x00)) {
+ ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va,
+ &page_attr, &table_level);
+ if (ret != 0) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ if (page_attr != base_page_attr) {
+ break;
+ }
+
+ base_va += XLAT_BLOCK_SIZE(table_level);
+ page_count += (XLAT_BLOCK_SIZE(table_level) >> PAGE_SIZE_SHIFT);
+ }
+
+ if (page_count > max_page_count) {
+ page_count = max_page_count;
+ }
+
+ /* Convert TF-A permission to FF-A permissions attributes. */
+ x2 = mmap_perm_to_ffa_perm(base_page_attr);
+
+ /* x3 should be page count - 1 */
+ SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, x2, --page_count);
}
/*******************************************************************************
diff --git a/services/std_svc/spm/el3_spmc/spmc_pm.c b/services/std_svc/spm/el3_spmc/spmc_pm.c
index 0a6215c..b267212 100644
--- a/services/std_svc/spm/el3_spmc/spmc_pm.c
+++ b/services/std_svc/spm/el3_spmc/spmc_pm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -201,7 +201,7 @@
/*******************************************************************************
* spmc_cpu_suspend_finish_handler
******************************************************************************/
-static void spmc_cpu_suspend_finish_handler(u_register_t unused)
+static void spmc_cpu_suspend_finish_handler(u_register_t unused, bool abandon)
{
struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
unsigned int linear_id = plat_my_core_pos();
@@ -210,6 +210,9 @@
/* Sanity check for a NULL pointer dereference. */
assert(sp != NULL);
+ /* EL3 SPMC is not expected to be used on platforms with pabandon */
+ assert(!abandon);
+
/*
* Check if the SP has subscribed for this power management message.
* If not then we don't have anything else to do here.
diff --git a/services/std_svc/spm/spm_mm/spm_mm_main.c b/services/std_svc/spm/spm_mm/spm_mm_main.c
index 60b34d2..c204987 100644
--- a/services/std_svc/spm/spm_mm/spm_mm_main.c
+++ b/services/std_svc/spm/spm_mm/spm_mm_main.c
@@ -312,6 +312,9 @@
uint64_t flags)
{
unsigned int ns;
+ int32_t ret;
+ uint32_t attr;
+ uint32_t page_count;
/* Determine which security state this SMC originated from */
ns = is_caller_non_secure(flags);
@@ -340,9 +343,17 @@
WARN("MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64 is available at boot time only\n");
SMC_RET1(handle, SPM_MM_NOT_SUPPORTED);
}
- SMC_RET1(handle,
- spm_memory_attributes_get_smc_handler(
- &sp_ctx, x1));
+
+ /* x2 = page_count - 1 */
+ page_count = x2 + 1;
+
+ ret = spm_memory_attributes_get_smc_handler(
+ &sp_ctx, x1, &page_count, &attr);
+ if (ret != SPM_MM_SUCCESS) {
+ SMC_RET1(handle, ret);
+ } else {
+ SMC_RET2(handle, attr, --page_count);
+ }
case MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64:
INFO("Received MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC\n");
diff --git a/services/std_svc/spm/spm_mm/spm_mm_private.h b/services/std_svc/spm/spm_mm/spm_mm_private.h
index 3a52a3e..473d84d 100644
--- a/services/std_svc/spm/spm_mm/spm_mm_private.h
+++ b/services/std_svc/spm/spm_mm/spm_mm_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2023, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -56,7 +56,9 @@
void spm_sp_setup(sp_context_t *sp_ctx);
int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
- uintptr_t base_va);
+ uintptr_t base_va,
+ uint32_t *page_count,
+ uint32_t *attr);
int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
u_register_t page_address,
u_register_t pages_count,
diff --git a/services/std_svc/spm/spm_mm/spm_mm_xlat.c b/services/std_svc/spm/spm_mm/spm_mm_xlat.c
index 01d95c7..32eda3a 100644
--- a/services/std_svc/spm/spm_mm/spm_mm_xlat.c
+++ b/services/std_svc/spm/spm_mm/spm_mm_xlat.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2025, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -88,22 +88,61 @@
}
int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
- uintptr_t base_va)
+ uintptr_t base_va,
+ uint32_t *page_count,
+ uint32_t *attr)
{
- uint32_t attributes;
+ uint32_t cur_attr;
+ uint32_t table_level;
+ uint32_t count;
+ int rc;
+
+ assert((page_count != NULL) && (*page_count > 0));
+ assert(attr != NULL);
+
+ base_va &= ~(PAGE_SIZE_MASK);
spin_lock(&mem_attr_smc_lock);
- int rc = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
- base_va, &attributes);
+ rc = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
+ base_va, attr, &table_level);
+ if (rc != 0) {
+ goto err_out;
+ }
+ /*
+ * Caculate how many pages in this block entry from base_va including
+ * its page.
+ */
+ count = ((XLAT_BLOCK_SIZE(table_level) -
+ (base_va & XLAT_BLOCK_MASK(table_level))) >> PAGE_SIZE_SHIFT);
+ base_va += XLAT_BLOCK_SIZE(table_level);
+
+ while ((count < *page_count) && (base_va != 0x00)) {
+ rc = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
+ base_va, &cur_attr, &table_level);
+ if (rc != 0) {
+ goto err_out;
+ }
+
+ if (*attr != cur_attr) {
+ *page_count = count;
+ break;
+ }
+
+ base_va += XLAT_BLOCK_SIZE(table_level);
+ count += (XLAT_BLOCK_SIZE(table_level) >> PAGE_SIZE_SHIFT);
+ }
+
+ *attr = smc_mmap_to_smc_attr(*attr);
+
+err_out:
spin_unlock(&mem_attr_smc_lock);
-
/* Convert error codes of xlat_get_mem_attributes_ctx() into SPM. */
assert((rc == 0) || (rc == -EINVAL));
if (rc == 0) {
- return (int32_t) smc_mmap_to_smc_attr(attributes);
+ return SPM_MM_SUCCESS;
} else {
return SPM_MM_INVALID_PARAMETER;
}