aboutsummaryrefslogtreecommitdiff
path: root/tftf/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tftf/tests')
-rw-r--r--tftf/tests/aarch32_tests_to_skip.txt20
-rw-r--r--tftf/tests/common/test_helpers.c62
-rw-r--r--tftf/tests/extensions/afp/test_afp.c35
-rw-r--r--tftf/tests/extensions/brbe/test_brbe.c36
-rw-r--r--tftf/tests/extensions/fgt/test_fgt.c108
-rw-r--r--tftf/tests/extensions/hcx/test_hcx.c46
-rw-r--r--tftf/tests/extensions/mpam/test_mpam.c28
-rw-r--r--tftf/tests/extensions/pauth/test_pauth.c165
-rw-r--r--tftf/tests/extensions/pmuv3/test_pmuv3.c249
-rw-r--r--tftf/tests/extensions/rng_trap/test_rng_trap.c86
-rw-r--r--tftf/tests/extensions/sme/test_sme.c178
-rw-r--r--tftf/tests/extensions/sme/test_sme2.c113
-rw-r--r--tftf/tests/extensions/spe/test_spe.c47
-rw-r--r--tftf/tests/extensions/sve/sve_operations.S39
-rw-r--r--tftf/tests/extensions/sve/test_sve.c13
-rw-r--r--tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.c59
-rw-r--r--tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.h15
-rw-r--r--tftf/tests/extensions/trbe/test_trbe.c33
-rw-r--r--tftf/tests/extensions/trf/test_trf.c31
-rw-r--r--tftf/tests/extensions/wfxt/test_wfxt.c89
-rw-r--r--tftf/tests/misc_tests/inject_ras_error.S (renamed from tftf/tests/misc_tests/inject_serror.S)52
-rw-r--r--tftf/tests/misc_tests/test_ea_ffh.c84
-rw-r--r--tftf/tests/misc_tests/test_firmware_handoff.c56
-rw-r--r--tftf/tests/misc_tests/test_invalid_access.c364
-rw-r--r--tftf/tests/misc_tests/test_nop.c84
-rw-r--r--tftf/tests/misc_tests/test_ras_ffh_nested.c139
-rw-r--r--tftf/tests/misc_tests/test_ras_kfh.c52
-rw-r--r--tftf/tests/misc_tests/test_ras_kfh_reflect.c181
-rw-r--r--tftf/tests/misc_tests/test_single_fault.c21
-rw-r--r--tftf/tests/misc_tests/test_uncontainable.c6
-rw-r--r--tftf/tests/misc_tests/test_undef_injection.c70
-rw-r--r--tftf/tests/performance_tests/test_psci_latencies.c21
-rw-r--r--tftf/tests/plat/xilinx/common/plat_pm.c87
-rw-r--r--tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_3.c164
-rw-r--r--tftf/tests/runtime_services/host_realm_managment/host_pmuv3.c240
-rw-r--r--tftf/tests/runtime_services/host_realm_managment/host_realm_helper.c449
-rw-r--r--tftf/tests/runtime_services/host_realm_managment/host_realm_rmi.c1267
-rw-r--r--tftf/tests/runtime_services/host_realm_managment/host_shared_data.c79
-rw-r--r--tftf/tests/runtime_services/host_realm_managment/rmi_delegate_tests.c348
-rw-r--r--tftf/tests/runtime_services/host_realm_managment/rmi_spm_tests.c485
-rw-r--r--tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c546
-rw-r--r--tftf/tests/runtime_services/realm_payload/host_realm_payload_simd_tests.c1365
-rw-r--r--tftf/tests/runtime_services/realm_payload/host_realm_payload_tests.c1275
-rw-r--r--tftf/tests/runtime_services/realm_payload/host_realm_spm.c413
-rw-r--r--tftf/tests/runtime_services/secure_service/aarch32/ffa_arch_helpers.S42
-rw-r--r--tftf/tests/runtime_services/secure_service/aarch64/ffa_arch_helpers.S62
-rw-r--r--tftf/tests/runtime_services/secure_service/ffa_helpers.c622
-rw-r--r--tftf/tests/runtime_services/secure_service/spm_common.c748
-rw-r--r--tftf/tests/runtime_services/secure_service/spm_test_helpers.c128
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c71
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_exceptions.c117
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_features.c47
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_group0_interrupts.c73
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_interrupts.c585
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c1074
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_notifications.c1564
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_rxtx_map.c67
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_secure_interrupts.c518
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_setup_and_discovery.c477
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_smccc.c165
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_smccc_asm.S240
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_version.c88
-rw-r--r--tftf/tests/runtime_services/secure_service/test_quark_request.c65
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spci_blocking_request.c189
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spci_blocking_while_busy.c170
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spci_handle_open.c108
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spci_non_blocking_interrupt.c146
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spci_non_blocking_request.c222
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spm_cpu_features.c69
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spm_simd.c211
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spm_smmu.c155
-rw-r--r--tftf/tests/runtime_services/standard_service/errata_abi/api_tests/test_errata_abi_functionality.c739
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c484
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c8
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c21
-rw-r--r--tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S30
-rw-r--r--tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_pstate.c288
-rw-r--r--tftf/tests/runtime_services/standard_service/trng/api_tests/test_trng.c15
-rw-r--r--tftf/tests/runtime_services/trusted_os/tsp/test_pstate_after_exception.c75
-rw-r--r--tftf/tests/tbb-tests/tbb_test_infra.c45
-rw-r--r--tftf/tests/tbb-tests/tbb_test_infra.h15
-rw-r--r--tftf/tests/tbb-tests/test_tbb_corrupt_fip.c44
-rw-r--r--tftf/tests/tests-corrupt-fip.mk15
-rw-r--r--tftf/tests/tests-corrupt-fip.xml15
-rw-r--r--tftf/tests/tests-cpu-extensions.mk23
-rw-r--r--tftf/tests/tests-cpu-extensions.xml17
-rw-r--r--tftf/tests/tests-ea-ffh.mk7
-rw-r--r--tftf/tests/tests-ea-ffh.xml15
-rw-r--r--tftf/tests/tests-errata_abi.mk7
-rw-r--r--tftf/tests/tests-errata_abi.xml15
-rw-r--r--tftf/tests/tests-extensive.mk4
-rw-r--r--tftf/tests/tests-extensive.xml28
-rw-r--r--tftf/tests/tests-firmware-handoff.mk13
-rw-r--r--tftf/tests/tests-firmware-handoff.xml14
-rw-r--r--tftf/tests/tests-hcx.mk9
-rw-r--r--tftf/tests/tests-hcx.xml15
-rw-r--r--tftf/tests/tests-memory-access.mk30
-rw-r--r--tftf/tests/tests-memory-access.xml62
-rw-r--r--tftf/tests/tests-nop.mk9
-rw-r--r--tftf/tests/tests-nop.xml16
-rw-r--r--tftf/tests/tests-psci.xml12
-rw-r--r--tftf/tests/tests-quark.mk9
-rw-r--r--tftf/tests/tests-quark.xml19
-rw-r--r--tftf/tests/tests-ras-ffh-nested.mk10
-rw-r--r--tftf/tests/tests-ras-ffh-nested.xml13
-rw-r--r--tftf/tests/tests-ras-kfh-reflect.mk10
-rw-r--r--tftf/tests/tests-ras-kfh-reflect.xml14
-rw-r--r--tftf/tests/tests-ras-kfh.mk10
-rw-r--r--tftf/tests/tests-ras-kfh.xml13
-rw-r--r--tftf/tests/tests-realm-payload.mk45
-rw-r--r--tftf/tests/tests-realm-payload.xml103
-rw-r--r--tftf/tests/tests-rmi-spm.mk31
-rw-r--r--tftf/tests/tests-rmi-spm.xml18
-rw-r--r--tftf/tests/tests-rng_trap.mk9
-rw-r--r--tftf/tests/tests-rng_trap.xml16
-rw-r--r--tftf/tests/tests-sdei.mk3
-rw-r--r--tftf/tests/tests-sdei.xml3
-rw-r--r--tftf/tests/tests-single-fault.mk4
-rw-r--r--tftf/tests/tests-smcfuzzing.mk40
-rw-r--r--tftf/tests/tests-spm.mk37
-rw-r--r--tftf/tests/tests-spm.xml179
-rw-r--r--tftf/tests/tests-standard.mk14
-rw-r--r--tftf/tests/tests-standard.xml8
-rw-r--r--tftf/tests/tests-tftf-validation.xml1
-rw-r--r--tftf/tests/tests-timer-stress.mk10
-rw-r--r--tftf/tests/tests-timer-stress.xml15
-rw-r--r--tftf/tests/tests-trng.mk7
-rw-r--r--tftf/tests/tests-tsp.mk3
-rw-r--r--tftf/tests/tests-tsp.xml7
-rw-r--r--tftf/tests/tests-uncontainable.mk4
-rw-r--r--tftf/tests/tests-undef-injection.mk7
-rw-r--r--tftf/tests/tests-undef-injection.xml14
-rw-r--r--tftf/tests/tests-versal.mk12
-rw-r--r--tftf/tests/tests-versal.xml20
-rw-r--r--tftf/tests/xlat_lib_v2/xlat_lib_v2_tests.c3
135 files changed, 18002 insertions, 2122 deletions
diff --git a/tftf/tests/aarch32_tests_to_skip.txt b/tftf/tests/aarch32_tests_to_skip.txt
new file mode 100644
index 000000000..210d46536
--- /dev/null
+++ b/tftf/tests/aarch32_tests_to_skip.txt
@@ -0,0 +1,20 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+Realm payload at EL1
+SIMD,SVE Registers context
+Invalid memory access with RME extension
+FF-A Setup and Discovery
+SP exceptions
+FF-A Direct messaging
+FF-A Group0 interrupts
+FF-A Power management
+FF-A Memory Sharing
+SIMD,SVE Registers context
+FF-A Interrupt
+SMMUv3 tests
+FF-A Notifications
+RMI and SPM tests
+FF-A SMCCC compliance
diff --git a/tftf/tests/common/test_helpers.c b/tftf/tests/common/test_helpers.c
index d794bebc7..6a0b08bd0 100644
--- a/tftf/tests/common/test_helpers.c
+++ b/tftf/tests/common/test_helpers.c
@@ -1,18 +1,17 @@
/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <stdlib.h>
+
#include <arch_helpers.h>
#include <plat_topology.h>
#include <platform.h>
-#include <power_management.h>
#include <test_helpers.h>
#include <tftf_lib.h>
-static struct mailbox_buffers test_mb = {.send = NULL, .recv = NULL};
-
int is_sys_suspend_state_ready(void)
{
int aff_info;
@@ -131,48 +130,33 @@ test_result_t map_test_unmap(const map_args_unmap_t *args,
return test_ret;
}
-void set_tftf_mailbox(const struct mailbox_buffers *mb)
+/*
+ * Utility function to wait for all CPUs other than the caller to be
+ * OFF.
+ */
+void wait_for_non_lead_cpus(void)
{
- if (mb != NULL) {
- test_mb = *mb;
- }
-}
+ unsigned int target_mpid, target_node;
-bool get_tftf_mailbox(struct mailbox_buffers *mb)
-{
- if ((test_mb.recv != NULL) && (test_mb.send != NULL)) {
- *mb = test_mb;
- return true;
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ wait_for_core_to_turn_off(target_mpid);
}
- return false;
}
-test_result_t check_spmc_testing_set_up(
- uint32_t ffa_version_major, uint32_t ffa_version_minor,
- const struct ffa_uuid *ffa_uuids, size_t ffa_uuids_size)
+void wait_for_core_to_turn_off(unsigned int mpidr)
{
- struct mailbox_buffers mb;
+ /* Skip lead CPU, as it is powered on */
+ if (mpidr == (read_mpidr_el1() & MPID_MASK))
+ return;
- if (ffa_uuids == NULL) {
- ERROR("Invalid parameter ffa_uuids!\n");
- return TEST_RESULT_FAIL;
- }
-
- SKIP_TEST_IF_FFA_VERSION_LESS_THAN(ffa_version_major,
- ffa_version_minor);
-
- /**********************************************************************
- * If OP-TEE is SPMC skip the current test.
- **********************************************************************/
- if (check_spmc_execution_level()) {
- VERBOSE("OPTEE as SPMC at S-EL1. Skipping test!\n");
- return TEST_RESULT_SKIPPED;
+ while (tftf_psci_affinity_info(mpidr, MPIDR_AFFLVL0) != PSCI_STATE_OFF) {
+ continue;
}
+}
- GET_TFTF_MAILBOX(mb);
-
- for (unsigned int i = 0U; i < ffa_uuids_size; i++)
- SKIP_TEST_IF_FFA_ENDPOINT_NOT_DEPLOYED(*mb, ffa_uuids[i].uuid);
-
- return TEST_RESULT_SUCCESS;
+/* Generate 64-bit random number */
+unsigned long long rand64(void)
+{
+ return ((unsigned long long)rand() << 32) | rand();
}
diff --git a/tftf/tests/extensions/afp/test_afp.c b/tftf/tests/extensions/afp/test_afp.c
new file mode 100644
index 000000000..625d9cf48
--- /dev/null
+++ b/tftf/tests/extensions/afp/test_afp.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <test_helpers.h>
+
+test_result_t test_afp_support(void)
+{
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ test_result_t ret;
+ uint64_t saved_fpcr, fpcr;
+
+ SKIP_TEST_IF_AFP_NOT_SUPPORTED();
+
+ saved_fpcr = read_fpcr();
+ /* Write advanced floating point controlling bits */
+ write_fpcr(saved_fpcr | FPCR_FIZ_BIT | FPCR_AH_BIT | FPCR_NEP_BIT);
+
+ fpcr = read_fpcr();
+ /* Check if all bits got written successfully */
+ if ((fpcr | ~(FPCR_FIZ_BIT | FPCR_AH_BIT | FPCR_NEP_BIT)) == ~0ULL) {
+ ret = TEST_RESULT_SUCCESS;
+ } else {
+ ret = TEST_RESULT_FAIL;
+ }
+
+ write_fpcr(saved_fpcr);
+
+ return ret;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/brbe/test_brbe.c b/tftf/tests/extensions/brbe/test_brbe.c
new file mode 100644
index 000000000..f2c244a32
--- /dev/null
+++ b/tftf/tests/extensions/brbe/test_brbe.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <tftf.h>
+
+/*
+ * EL3 is expected to allow access to branch record buffer control registers
+ * from NS world. Accessing these registers will trap to EL3 and crash when EL3
+ * has not properly enabled it.
+ */
+test_result_t test_brbe_enabled(void)
+{
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ SKIP_TEST_IF_BRBE_NOT_SUPPORTED();
+
+ read_brbcr_el1();
+ read_brbcr_el2();
+ read_brbfcr_el1();
+ read_brbts_el1();
+ read_brbinfinj_el1();
+ read_brbsrcinj_el1();
+ read_brbtgtinj_el1();
+ read_brbidr0_el1();
+
+ return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/fgt/test_fgt.c b/tftf/tests/extensions/fgt/test_fgt.c
index 6213d4bf5..5d9600d72 100644
--- a/tftf/tests/extensions/fgt/test_fgt.c
+++ b/tftf/tests/extensions/fgt/test_fgt.c
@@ -10,6 +10,38 @@
#include <string.h>
#include <arch_helpers.h>
+#ifdef __aarch64__
+
+static bool is_init_val_set(u_register_t reg, u_register_t init_val,
+ u_register_t feat_mask)
+{
+ return (reg & feat_mask) == (init_val & feat_mask);
+}
+
+#define CHECK_FEAT_TRAP_INITIALIZED(_reg, _REG, _feat_check, _FEAT) \
+do { \
+ if (_feat_check() != 0) { \
+ if (is_init_val_set(_reg, _REG ## _INIT_VAL, \
+ _REG ## _ ## FEAT_ ## _FEAT ## _MASK) == 0) { \
+ return TEST_RESULT_FAIL; \
+ } \
+ } \
+} while (false);
+
+#define CHECK_FEAT_TRAP_INITIALIZED2(_reg, _REG, _feat_check, _FEAT, \
+ _feat2_check, _FEAT2, _op) \
+do { \
+ if ((_feat_check() != 0) _op (_feat2_check() != 0)) { \
+ if (is_init_val_set(_reg, _REG ## _INIT_VAL, _REG ## _ \
+ ## FEAT_ ## _FEAT ## _ ## _FEAT2 ## _MASK) \
+ == 0) { \
+ return TEST_RESULT_FAIL; \
+ } \
+ } \
+} while (false);
+
+#endif
+
/*
* TF-A is expected to allow access to ARMv8.6-FGT system registers from EL2.
* Reading these registers causes a trap to EL3 and crash when TF-A has not
@@ -21,12 +53,82 @@ test_result_t test_fgt_enabled(void)
#ifdef __aarch64__
SKIP_TEST_IF_FGT_NOT_SUPPORTED();
- read_hfgrtr_el2();
- read_hfgwtr_el2();
- read_hfgitr_el2();
+
+ u_register_t hfgitr_el2 = read_hfgitr_el2();
+ u_register_t hfgrtr_el2 = read_hfgrtr_el2();
+ u_register_t hfgwtr_el2 = read_hfgwtr_el2();
+
+ /*
+ * The following registers are not supposed to be consumed, but
+ * are read to test their presence when FEAT_FGT is supported.
+ */
read_hdfgrtr_el2();
read_hdfgwtr_el2();
+ CHECK_FEAT_TRAP_INITIALIZED(hfgitr_el2, HFGITR_EL2, \
+ get_feat_brbe_support, BRBE)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgitr_el2, HFGITR_EL2, \
+ is_feat_specres_present, SPECRES)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgitr_el2, HFGITR_EL2, \
+ is_feat_tlbirange_present, TLBIRANGE)
+ CHECK_FEAT_TRAP_INITIALIZED2(hfgitr_el2, HFGITR_EL2, \
+ is_feat_tlbirange_present, TLBIRANGE, \
+ is_feat_tlbios_present, TLBIOS, &&)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgitr_el2, HFGITR_EL2, \
+ is_feat_tlbios_present, TLBIOS)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgitr_el2, HFGITR_EL2, \
+ is_armv8_2_pan2_present, PAN2)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgitr_el2, HFGITR_EL2, \
+ is_feat_dpb2_present, DPB2)
+ if (is_init_val_set(hfgitr_el2, HFGITR_EL2_INIT_VAL,
+ HFGITR_EL2_NON_FEAT_DEPENDENT_MASK) == 0) {
+ return TEST_RESULT_FAIL;
+ }
+
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_sme_supported, SME)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_ls64_accdata_present, LS64_ACCDATA)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_ras_present, RAS)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_rasv1p1_present, RASV1P1)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_gicv3_gicv4_present, GICV3)
+ CHECK_FEAT_TRAP_INITIALIZED2(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_csv2_2_present, CSV2_2, \
+ is_feat_csv2_1p2_present, CSV2_1P2, ||)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_lor_present, LOR)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_armv8_3_pauth_apa_api_apa3_present, PAUTH)
+ if (is_init_val_set(hfgrtr_el2, HFGRTR_EL2_INIT_VAL,
+ HFGRTR_EL2_NON_FEAT_DEPENDENT_MASK) == 0) {
+ return TEST_RESULT_FAIL;
+ }
+
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_sme_supported, SME)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_ls64_accdata_present, LS64_ACCDATA);
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_ras_present, RAS);
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_rasv1p1_present, RASV1P1);
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_gicv3_gicv4_present, GICV3);
+ CHECK_FEAT_TRAP_INITIALIZED2(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_csv2_2_present, CSV2_2, \
+ is_feat_csv2_1p2_present, CSV2_1P2, ||)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_lor_present, LOR)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_armv8_3_pauth_apa_api_apa3_present, PAUTH)
+ if (is_init_val_set(hfgwtr_el2, HFGWTR_EL2_INIT_VAL, \
+ HFGWTR_EL2_NON_FEAT_DEPENDENT_MASK) == 0) {
+ return TEST_RESULT_FAIL;
+ }
+
return TEST_RESULT_SUCCESS;
#endif /* __aarch64__ */
}
diff --git a/tftf/tests/extensions/hcx/test_hcx.c b/tftf/tests/extensions/hcx/test_hcx.c
new file mode 100644
index 000000000..3621f2126
--- /dev/null
+++ b/tftf/tests/extensions/hcx/test_hcx.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <tftf_lib.h>
+#include <tftf.h>
+#include <arch_helpers.h>
+#include <arch_features.h>
+
+/* This very simple test just ensures that HCRX_EL2 access does not trap. */
+test_result_t test_feat_hcx_enabled(void)
+{
+#ifdef __aarch64__
+ u_register_t hcrx_el2;
+
+ /* Make sure FEAT_HCX is supported. */
+ if (!get_feat_hcx_support()) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Attempt to read HCRX_EL2, if not enabled this should trap to EL3. */
+ hcrx_el2 = read_hcrx_el2();
+
+ /*
+ * If we make it this far, access to HCRX_EL2 was not trapped, and
+ * therefore FEAT_HCX is supported.
+ */
+ if (hcrx_el2 == HCRX_EL2_INIT_VAL) {
+ /*
+ * If the value of the register is the reset value, the test
+ * passed.
+ */
+ return TEST_RESULT_SUCCESS;
+ }
+ /*
+ * Otherwise, the test fails, as the HCRX_EL2 register has
+ * not been initialized properly.
+ */
+ return TEST_RESULT_FAIL;
+#else
+ /* Skip test if AArch32 */
+ return TEST_RESULT_SKIPPED;
+#endif
+}
diff --git a/tftf/tests/extensions/mpam/test_mpam.c b/tftf/tests/extensions/mpam/test_mpam.c
new file mode 100644
index 000000000..eb40bc5ca
--- /dev/null
+++ b/tftf/tests/extensions/mpam/test_mpam.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <tftf.h>
+
+/* EL3 is expected to allow access to MPAM system registers from EL2.
+ * Reading these registers will trap to EL3 and crash when EL3 has not
+ * allowed access.
+ */
+
+test_result_t test_mpam_reg_access(void)
+{
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ SKIP_TEST_IF_MPAM_NOT_SUPPORTED();
+
+ read_mpamidr_el1();
+ read_mpam2_el2();
+
+ return TEST_RESULT_SUCCESS;
+#endif
+}
diff --git a/tftf/tests/extensions/pauth/test_pauth.c b/tftf/tests/extensions/pauth/test_pauth.c
index 30b78ef19..ada2f1d70 100644
--- a/tftf/tests/extensions/pauth/test_pauth.c
+++ b/tftf/tests/extensions/pauth/test_pauth.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -14,155 +14,9 @@
#include <string.h>
#ifdef __aarch64__
-
-/* Number of ARMv8.3-PAuth keys */
-#define NUM_KEYS 5
-
-static const char * const key_name[] = {"IA", "IB", "DA", "DB", "GA"};
-
static uint128_t pauth_keys_before[NUM_KEYS];
static uint128_t pauth_keys_after[NUM_KEYS];
-
-/* Check if ARMv8.3-PAuth key is enabled */
-static bool is_pauth_key_enabled(uint64_t key_bit)
-{
- return (IS_IN_EL2() ?
- ((read_sctlr_el2() & key_bit) != 0U) :
- ((read_sctlr_el1() & key_bit) != 0U));
-}
-
-static test_result_t compare_pauth_keys(void)
-{
- test_result_t result = TEST_RESULT_SUCCESS;
-
- for (unsigned int i = 0; i < NUM_KEYS; ++i) {
- if (pauth_keys_before[i] != pauth_keys_after[i]) {
- ERROR("AP%sKey_EL1 read 0x%llx:%llx "
- "expected 0x%llx:%llx\n", key_name[i],
- (uint64_t)(pauth_keys_after[i] >> 64),
- (uint64_t)(pauth_keys_after[i]),
- (uint64_t)(pauth_keys_before[i] >> 64),
- (uint64_t)(pauth_keys_before[i]));
-
- result = TEST_RESULT_FAIL;
- }
- }
- return result;
-}
-
-/*
- * Program or read ARMv8.3-PAuth keys (if already enabled)
- * and store them in <pauth_keys_before> buffer
- */
-static void set_store_pauth_keys(void)
-{
- uint128_t plat_key;
-
- memset(pauth_keys_before, 0, NUM_KEYS * sizeof(uint128_t));
-
- if (is_armv8_3_pauth_apa_api_present()) {
- if (is_pauth_key_enabled(SCTLR_EnIA_BIT)) {
- /* Read APIAKey_EL1 */
- plat_key = read_apiakeylo_el1() |
- ((uint128_t)(read_apiakeyhi_el1()) << 64);
- INFO("EnIA is set\n");
- } else {
- /* Program APIAKey_EL1 */
- plat_key = init_apkey();
- write_apiakeylo_el1((uint64_t)plat_key);
- write_apiakeyhi_el1((uint64_t)(plat_key >> 64));
- }
- pauth_keys_before[0] = plat_key;
-
- if (is_pauth_key_enabled(SCTLR_EnIB_BIT)) {
- /* Read APIBKey_EL1 */
- plat_key = read_apibkeylo_el1() |
- ((uint128_t)(read_apibkeyhi_el1()) << 64);
- INFO("EnIB is set\n");
- } else {
- /* Program APIBKey_EL1 */
- plat_key = init_apkey();
- write_apibkeylo_el1((uint64_t)plat_key);
- write_apibkeyhi_el1((uint64_t)(plat_key >> 64));
- }
- pauth_keys_before[1] = plat_key;
-
- if (is_pauth_key_enabled(SCTLR_EnDA_BIT)) {
- /* Read APDAKey_EL1 */
- plat_key = read_apdakeylo_el1() |
- ((uint128_t)(read_apdakeyhi_el1()) << 64);
- INFO("EnDA is set\n");
- } else {
- /* Program APDAKey_EL1 */
- plat_key = init_apkey();
- write_apdakeylo_el1((uint64_t)plat_key);
- write_apdakeyhi_el1((uint64_t)(plat_key >> 64));
- }
- pauth_keys_before[2] = plat_key;
-
- if (is_pauth_key_enabled(SCTLR_EnDB_BIT)) {
- /* Read APDBKey_EL1 */
- plat_key = read_apdbkeylo_el1() |
- ((uint128_t)(read_apdbkeyhi_el1()) << 64);
- INFO("EnDB is set\n");
- } else {
- /* Program APDBKey_EL1 */
- plat_key = init_apkey();
- write_apdbkeylo_el1((uint64_t)plat_key);
- write_apdbkeyhi_el1((uint64_t)(plat_key >> 64));
- }
- pauth_keys_before[3] = plat_key;
- }
-
- /*
- * It is safe to assume that Generic Pointer authentication code key
- * APGAKey_EL1 can be re-programmed, as this key is not set in
- * TF-A Test suite and PACGA instruction is not used.
- */
- if (is_armv8_3_pauth_gpa_gpi_present()) {
- /* Program APGAKey_EL1 */
- plat_key = init_apkey();
- write_apgakeylo_el1((uint64_t)plat_key);
- write_apgakeyhi_el1((uint64_t)(plat_key >> 64));
- pauth_keys_before[4] = plat_key;
- }
-
- isb();
-}
-
-/*
- * Read ARMv8.3-PAuth keys and store them in
- * <pauth_keys_after> buffer
- */
-static void read_pauth_keys(void)
-{
- memset(pauth_keys_after, 0, NUM_KEYS * sizeof(uint128_t));
-
- if (is_armv8_3_pauth_apa_api_present()) {
- /* Read APIAKey_EL1 */
- pauth_keys_after[0] = read_apiakeylo_el1() |
- ((uint128_t)(read_apiakeyhi_el1()) << 64);
-
- /* Read APIBKey_EL1 */
- pauth_keys_after[1] = read_apibkeylo_el1() |
- ((uint128_t)(read_apibkeyhi_el1()) << 64);
-
- /* Read APDAKey_EL1 */
- pauth_keys_after[2] = read_apdakeylo_el1() |
- ((uint128_t)(read_apdakeyhi_el1()) << 64);
-
- /* Read APDBKey_EL1 */
- pauth_keys_after[3] = read_apdbkeylo_el1() |
- ((uint128_t)(read_apdbkeyhi_el1()) << 64);
- }
-
- if (is_armv8_3_pauth_gpa_gpi_present()) {
- /* Read APGAKey_EL1 */
- pauth_keys_after[4] = read_apgakeylo_el1() |
- ((uint128_t)(read_apgakeyhi_el1()) << 64);
- }
-}
-#endif /* __aarch64__ */
+#endif
/*
* TF-A is expected to allow access to key registers from lower EL's,
@@ -174,7 +28,7 @@ test_result_t test_pauth_reg_access(void)
SKIP_TEST_IF_AARCH32();
#ifdef __aarch64__
SKIP_TEST_IF_PAUTH_NOT_SUPPORTED();
- read_pauth_keys();
+ pauth_test_lib_read_keys(pauth_keys_before);
return TEST_RESULT_SUCCESS;
#endif /* __aarch64__ */
}
@@ -188,13 +42,11 @@ test_result_t test_pauth_leakage(void)
SKIP_TEST_IF_AARCH32();
#ifdef __aarch64__
SKIP_TEST_IF_PAUTH_NOT_SUPPORTED();
- set_store_pauth_keys();
+ pauth_test_lib_read_keys(pauth_keys_before);
tftf_get_psci_version();
- read_pauth_keys();
-
- return compare_pauth_keys();
+ return pauth_test_lib_compare_template(pauth_keys_before, pauth_keys_after);
#endif /* __aarch64__ */
}
@@ -220,7 +72,6 @@ test_result_t test_pauth_instructions(void)
ARM_ARCH_MAJOR, ARM_ARCH_MINOR);
return TEST_RESULT_SKIPPED;
#endif /* ARM_ARCH_AT_LEAST(8, 3) */
-
#endif /* __aarch64__ */
}
@@ -238,7 +89,7 @@ test_result_t test_pauth_leakage_tsp(void)
SKIP_TEST_IF_PAUTH_NOT_SUPPORTED();
SKIP_TEST_IF_TSP_NOT_PRESENT();
- set_store_pauth_keys();
+ pauth_test_lib_fill_regs_and_template(pauth_keys_before);
/* Standard SMC to ADD two numbers */
tsp_svc_params.fid = TSP_STD_FID(TSP_ADD);
@@ -260,8 +111,6 @@ test_result_t test_pauth_leakage_tsp(void)
return TEST_RESULT_FAIL;
}
- read_pauth_keys();
-
- return compare_pauth_keys();
+ return pauth_test_lib_compare_template(pauth_keys_before, pauth_keys_after);
#endif /* __aarch64__ */
}
diff --git a/tftf/tests/extensions/pmuv3/test_pmuv3.c b/tftf/tests/extensions/pmuv3/test_pmuv3.c
new file mode 100644
index 000000000..725b4e716
--- /dev/null
+++ b/tftf/tests/extensions/pmuv3/test_pmuv3.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <test_helpers.h>
+
+/* tests target aarch64. Aarch32 is too different to even build */
+#if defined(__aarch64__)
+
+#define PMU_EVT_INST_RETIRED 0x0008
+#define NOP_REPETITIONS 50
+#define MAX_COUNTERS 32
+
+static inline void read_all_counters(u_register_t *array, int impl_ev_ctrs)
+{
+ array[0] = read_pmccntr_el0();
+ for (int i = 0; i < impl_ev_ctrs; i++) {
+ array[i + 1] = read_pmevcntrn_el0(i);
+ }
+}
+
+static inline void read_all_counter_configs(u_register_t *array, int impl_ev_ctrs)
+{
+ array[0] = read_pmccfiltr_el0();
+ for (int i = 0; i < impl_ev_ctrs; i++) {
+ array[i + 1] = read_pmevtypern_el0(i);
+ }
+}
+
+static inline void read_all_pmu_configs(u_register_t *array)
+{
+ array[0] = read_pmcntenset_el0();
+ array[1] = read_pmcr_el0();
+ array[2] = read_pmselr_el0();
+ array[3] = (IS_IN_EL2()) ? read_mdcr_el2() : 0;
+}
+
+static inline void enable_counting(void)
+{
+ write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_E_BIT);
+ /* this function means we are about to use the PMU, synchronize */
+ isb();
+}
+
+static inline void disable_counting(void)
+{
+ write_pmcr_el0(read_pmcr_el0() & ~PMCR_EL0_E_BIT);
+ /* we also rely that disabling really did work */
+ isb();
+}
+
+static inline void clear_counters(void)
+{
+ write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_C_BIT | PMCR_EL0_P_BIT);
+}
+
+/*
+ * tftf runs in EL2, don't bother enabling counting at lower ELs and secure
+ * world. TF-A has other controls for them and counting there doesn't impact us
+ */
+static inline void enable_cycle_counter(void)
+{
+ write_pmccfiltr_el0(PMCCFILTR_EL0_NSH_BIT);
+ write_pmcntenset_el0(read_pmcntenset_el0() | PMCNTENSET_EL0_C_BIT);
+}
+
+static inline void enable_event_counter(int ctr_num)
+{
+ write_pmevtypern_el0(ctr_num, PMEVTYPER_EL0_NSH_BIT |
+ (PMU_EVT_INST_RETIRED & PMEVTYPER_EL0_EVTCOUNT_BITS));
+ write_pmcntenset_el0(read_pmcntenset_el0() |
+ PMCNTENSET_EL0_P_BIT(ctr_num));
+}
+
+/* doesn't really matter what happens, as long as it happens a lot */
+static inline void execute_nops(void)
+{
+ for (int i = 0; i < NOP_REPETITIONS; i++) {
+ __asm__ ("orr x0, x0, x0\n");
+ }
+}
+
+static inline void execute_el3_nop(void)
+{
+ /* ask EL3 for some info, no side effects */
+ smc_args args = { SMCCC_VERSION };
+
+ /* return values don't matter */
+ tftf_smc(&args);
+}
+
+#endif /* defined(__aarch64__) */
+
+/*
+ * try the cycle counter with some NOPs to see if it works
+ */
+test_result_t test_pmuv3_cycle_works_ns(void)
+{
+ SKIP_TEST_IF_AARCH32();
+#if defined(__aarch64__)
+ u_register_t ccounter_start;
+ u_register_t ccounter_end;
+
+ SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
+
+ enable_cycle_counter();
+ enable_counting();
+
+ ccounter_start = read_pmccntr_el0();
+ execute_nops();
+ ccounter_end = read_pmccntr_el0();
+ disable_counting();
+ clear_counters();
+
+ tftf_testcase_printf("Counted from %ld to %ld\n",
+ ccounter_start, ccounter_end);
+ if (ccounter_start != ccounter_end) {
+ return TEST_RESULT_SUCCESS;
+ }
+ return TEST_RESULT_FAIL;
+#endif /* defined(__aarch64__) */
+}
+
+/*
+ * try an event counter with some NOPs to see if it works. MDCR_EL2.HPMN can
+ * make this tricky so take extra care.
+ */
+test_result_t test_pmuv3_event_works_ns(void)
+{
+ SKIP_TEST_IF_AARCH32();
+#if defined(__aarch64__)
+ u_register_t evcounter_start;
+ u_register_t evcounter_end;
+ u_register_t mdcr_el2 = ~0;
+
+ SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
+
+ /* use the real value or use the dummy value to skip checks later */
+ if (IS_IN_EL2()) {
+ mdcr_el2 = read_mdcr_el2();
+ }
+
+ if (((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & PMCR_EL0_N_MASK) == 0) {
+ tftf_testcase_printf("No event counters implemented\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* FEAT_HPMN0 only affects event counters */
+ if ((mdcr_el2 & MDCR_EL2_HPMN_MASK) == 0) {
+ if (!get_feat_hpmn0_supported()) {
+ tftf_testcase_printf(
+ "FEAT_HPMN0 not implemented but HPMN is 0\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* the test will fail in this case */
+ if ((mdcr_el2 & MDCR_EL2_HPME_BIT) == 0) {
+ tftf_testcase_printf(
+ "HPMN is 0 and HPME is not set!\n");
+ }
+ }
+
+ enable_event_counter(0);
+ enable_counting();
+
+ /*
+ * if any are enabled it will be the very first one. HPME can disable
+ * the higher end of the counters and HPMN can put the boundary
+ * anywhere
+ */
+ evcounter_start = read_pmevcntrn_el0(0);
+ execute_nops();
+ evcounter_end = read_pmevcntrn_el0(0);
+ disable_counting();
+ clear_counters();
+
+ tftf_testcase_printf("Counted from %ld to %ld\n",
+ evcounter_start, evcounter_end);
+ if (evcounter_start != evcounter_end) {
+ return TEST_RESULT_SUCCESS;
+ }
+ return TEST_RESULT_FAIL;
+#endif /* defined(__aarch64__) */
+}
+
+
+/*
+ * check if entering/exiting EL3 (with a NOP) preserves all PMU registers.
+ */
+test_result_t test_pmuv3_el3_preserves(void)
+{
+ SKIP_TEST_IF_AARCH32();
+#if defined(__aarch64__)
+ u_register_t ctr_start[MAX_COUNTERS] = {0};
+ u_register_t ctr_cfg_start[MAX_COUNTERS] = {0};
+ u_register_t pmu_cfg_start[4];
+ u_register_t ctr_end[MAX_COUNTERS] = {0};
+ u_register_t ctr_cfg_end[MAX_COUNTERS] = {0};
+ u_register_t pmu_cfg_end[4];
+ int impl_ev_ctrs;
+
+ SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
+
+ impl_ev_ctrs = (read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & PMCR_EL0_N_MASK;
+
+ /* start from 0 so we know we can't overflow */
+ clear_counters();
+ /* pretend counters have just been used */
+ enable_cycle_counter();
+ enable_event_counter(0);
+ enable_counting();
+ execute_nops();
+ disable_counting();
+
+ /* get before reading */
+ read_all_counters(ctr_start, impl_ev_ctrs);
+ read_all_counter_configs(ctr_cfg_start, impl_ev_ctrs);
+ read_all_pmu_configs(pmu_cfg_start);
+
+ /* give EL3 a chance to scramble everything */
+ execute_el3_nop();
+
+ /* get after reading */
+ read_all_counters(ctr_end, impl_ev_ctrs);
+ read_all_counter_configs(ctr_cfg_end, impl_ev_ctrs);
+ read_all_pmu_configs(pmu_cfg_end);
+
+ if (memcmp(ctr_start, ctr_end, sizeof(ctr_start)) != 0) {
+ tftf_testcase_printf("SMC call did not preserve counters\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (memcmp(ctr_cfg_start, ctr_cfg_end, sizeof(ctr_cfg_start)) != 0) {
+ tftf_testcase_printf("SMC call did not preserve counter config\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (memcmp(pmu_cfg_start, pmu_cfg_end, sizeof(pmu_cfg_start)) != 0) {
+ tftf_testcase_printf("SMC call did not preserve PMU registers\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+#endif /* defined(__aarch64__) */
+}
diff --git a/tftf/tests/extensions/rng_trap/test_rng_trap.c b/tftf/tests/extensions/rng_trap/test_rng_trap.c
new file mode 100644
index 000000000..49ee6adfc
--- /dev/null
+++ b/tftf/tests/extensions/rng_trap/test_rng_trap.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <test_helpers.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+#define MAX_ITERATIONS_EXCLUSIVE 100
+
+/*
+ * This test ensures that a RNDR/RNDRRS instructions causes a trap to EL3 and
+ * generates a random number each time.
+ * Argument "use_rndrrs" decides whether to execute "rndrrs" or "rndr" instruction.
+ *
+ * This test usage load/store exclusive pairs to detect whether the execution
+ * trapped to EL3 or not?
+ * It relies on the fact that when exception level changes the monitor is cleared.
+ * In a load/store exclusive pair with stxr instruction, the status gets updated
+ * to '1' when monitor is cleared.
+ * In this test start with ldxr and execute trap instruction and if the trap to EL3
+ * happened then stxr status will be '1', to avoid chances of monitor being cleared
+ * (highly unlikely in this scenario) by any other reason do this test iteratively.
+ * If stxr succeds even single time we are sure that trap did not happen.
+ */
+static test_result_t test_rng_trap(bool use_rndrrs)
+{
+#if defined __aarch64__
+ u_register_t rng, rng1 = 0;
+ u_register_t exclusive;
+ u_register_t status;
+ unsigned int i;
+
+ /* Make sure FEAT_RNG_TRAP is supported. */
+ SKIP_TEST_IF_RNG_TRAP_NOT_SUPPORTED();
+
+ /*
+ * The test was inserted in a loop that runs a safe number of times
+ * in order to discard any possible trap returns other than RNG_TRAP
+ */
+ for (i = 0; i < MAX_ITERATIONS_EXCLUSIVE; i++) {
+ /* Attempt to acquire address for exclusive access */
+ __asm__ volatile ("ldxr %0, %1\n" : "=r"(rng)
+ : "Q"(exclusive));
+ if (use_rndrrs) {
+ /* Attempt to read RNDRRS. */
+ __asm__ volatile ("mrs %0, rndrrs\n" : "=r" (rng));
+ } else {
+ /* Attempt to read RNDR. */
+ __asm__ volatile ("mrs %0, rndr\n" : "=r" (rng));
+ }
+ /*
+ * After returning from the trap, the monitor variable should
+ * be cleared, so the status value should be 1.
+ */
+ __asm__ volatile ("stxr %w0, %1, %2\n" : "=&r"(status)
+ : "r"(rng), "Q"(exclusive));
+ /* If monitor is not cleared or not a new random number */
+ if ((status == 0) || (rng == rng1)) {
+ return TEST_RESULT_FAIL;
+ }
+ rng1 = rng;
+ }
+
+ return TEST_RESULT_SUCCESS;
+#else
+ /* Skip test if AArch32 */
+ SKIP_TEST_IF_AARCH32();
+#endif
+}
+
+/* Test RNDR read access causes a trap to EL3 and generates a random number each time */
+test_result_t test_rndr_rng_trap(void)
+{
+ return test_rng_trap(false);
+}
+
+/* Test RNDRRS read access causes a trap to EL3 and generates a random number each time */
+test_result_t test_rndrrs_rng_trap(void)
+{
+ return test_rng_trap(true);
+}
diff --git a/tftf/tests/extensions/sme/test_sme.c b/tftf/tests/extensions/sme/test_sme.c
new file mode 100644
index 000000000..39c64571b
--- /dev/null
+++ b/tftf/tests/extensions/sme/test_sme.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <lib/extensions/sme.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+#ifdef __aarch64__
+
+/* Global buffers*/
+static __aligned(16) uint64_t ZA_In_vector[8] = {0xaaff, 0xbbff, 0xccff, 0xddff, 0xeeff,
+ 0xffff, 0xff00, 0xff00};
+static __aligned(16) uint64_t ZA_Out_vector[8] = {0};
+
+/**
+ * sme_zero_ZA
+ * ZER0 : Zero a list of upto eight 64bit element ZA tiles.
+ * ZERO {<mask>} , where mask=ff, to clear all the 8, 64 bit elements.
+ */
+static void sme_zero_ZA(void)
+{
+ /**
+ * Due to the lack of support from the toolchain, instruction
+ * opcodes are used here.
+ * Manual Encoding Instruction, to Zero all the tiles of ZA array.
+ *
+ * TODO: Further, once the toolchain adds support for SME features
+ * this could be replaced with the actual instruction ZERO { <mask>}.
+ */
+ asm volatile(".inst 0xc008000f" : : : );
+}
+
+/**
+ * This function compares two buffers/vector elements
+ * Inputs: uint64_t *ZA_In_vector, ZA_Out_vector
+ * @return true : If both are equal
+ * @return false : If both are not equal
+ */
+static bool sme_cmp_vector(const uint64_t *ZA_In_vector, const uint64_t *ZA_Out_vector)
+{
+ bool ret = true;
+
+ for (int i = 0; i < (MAX_VL_B/8); i++) {
+ if (ZA_In_vector[i] != ZA_Out_vector[i]) {
+ ret = false;
+ }
+ }
+
+ return ret;
+}
+
+#endif /* __aarch64__ */
+
+test_result_t test_sme_support(void)
+{
+ /* SME is an AArch64-only feature.*/
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ u_register_t reg;
+ unsigned int current_vector_len;
+ unsigned int requested_vector_len;
+ unsigned int len_max;
+ unsigned int __unused svl_max = 0U;
+ u_register_t saved_smcr;
+
+ /* Skip the test if SME is not supported. */
+ SKIP_TEST_IF_SME_NOT_SUPPORTED();
+
+ /* Make sure TPIDR2_EL0 is accessible. */
+ write_tpidr2_el0(0);
+ if (read_tpidr2_el0() != 0) {
+ ERROR("Could not read TPIDR2_EL0.\n");
+ return TEST_RESULT_FAIL;
+ }
+ write_tpidr2_el0(0xb0bafe77);
+ if (read_tpidr2_el0() != 0xb0bafe77) {
+ ERROR("Could not write TPIDR2_EL0.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Iterate through values for LEN to detect supported vector lengths.
+ */
+
+ /* Entering Streaming SVE mode */
+ sme_smstart(SMSTART_SM);
+
+ saved_smcr = read_smcr_el2();
+
+ /* Write SMCR_EL2 with the LEN max to find implemented width. */
+ write_smcr_el2(MASK(SMCR_ELX_RAZ_LEN));
+ isb();
+
+ len_max = (unsigned int)read_smcr_el2();
+ VERBOSE("Maximum SMCR_EL2.LEN value: 0x%x\n", len_max);
+ VERBOSE("Enumerating supported vector lengths...\n");
+ for (unsigned int i = 0; i <= len_max; i++) {
+ /* Load new value into SMCR_EL2.RAZ_LEN */
+ reg = read_smcr_el2();
+ reg &= ~(MASK(SMCR_ELX_RAZ_LEN));
+ reg |= INPLACE(SMCR_ELX_RAZ_LEN, i);
+ write_smcr_el2(reg);
+ isb();
+
+ /* Compute current and requested vector lengths in bits. */
+ current_vector_len = ((unsigned int)sme_rdsvl_1() * 8U);
+ requested_vector_len = (i + 1U) * 128U;
+
+ /*
+ * We count down from the maximum SMLEN value, so if the values
+ * match, we've found the largest supported value for SMLEN.
+ */
+ if (current_vector_len == requested_vector_len) {
+ svl_max = current_vector_len;
+ VERBOSE("SUPPORTED: %u bits (LEN=%u)\n",
+ requested_vector_len, i);
+ } else {
+ VERBOSE("NOT SUPPORTED: %u bits (LEN=%u)\n",
+ requested_vector_len, i);
+ }
+ }
+
+ INFO("Largest Supported Streaming Vector Length(SVL): %u bits\n",
+ svl_max);
+
+ /* Exiting Streaming SVE mode */
+ sme_smstop(SMSTOP_SM);
+
+ /**
+ * Perform/Execute SME Instructions.
+ * SME Data processing instructions LDR, STR, and ZERO instructions that
+ * access the SME ZA storage are legal only if ZA is enabled.
+ */
+
+ /* Enable SME ZA Array Storage */
+ sme_smstart(SMSTART_ZA);
+
+ /* LDR : Load vector to ZA Array */
+ sme_vector_to_ZA(ZA_In_vector);
+
+ /* STR : Store Vector from ZA Array. */
+ sme_ZA_to_vector(ZA_Out_vector);
+
+ /* Compare both vectors to ensure load and store instructions have
+ * executed precisely.
+ */
+ if (!sme_cmp_vector(ZA_In_vector, ZA_Out_vector)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Zero or clear the entire ZA Array Storage/Tile */
+ sme_zero_ZA();
+
+ /* Disable the SME ZA array storage. */
+ sme_smstop(SMSTOP_ZA);
+
+ /* If FEAT_SME_FA64 then attempt to execute an illegal instruction. */
+ if (is_feat_sme_fa64_supported()) {
+ VERBOSE("FA64 supported, trying illegal instruction.\n");
+ sme_try_illegal_instruction();
+ }
+
+ write_smcr_el2(saved_smcr);
+ isb();
+
+ return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/sme/test_sme2.c b/tftf/tests/extensions/sme/test_sme2.c
new file mode 100644
index 000000000..e82da08ee
--- /dev/null
+++ b/tftf/tests/extensions/sme/test_sme2.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <lib/extensions/sme.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+#ifdef __aarch64__
+
+#define SME2_ARRAYSIZE (512/64)
+#define SME2_INPUT_DATA (0x1fffffffffffffff)
+
+/* Global buffers */
+static __aligned(16) uint64_t sme2_input_buffer[SME2_ARRAYSIZE] = {0};
+static __aligned(16) uint64_t sme2_output_buffer[SME2_ARRAYSIZE] = {0};
+
+/*
+ * clear_ZT0: ZERO all bytes of the ZT0 register.
+ *
+ */
+static void clear_ZT0(void)
+{
+ /**
+ * Due to the lack of support from the toolchain, instruction
+ * opcodes are used here.
+ * TODO: Further, once the toolchain adds support for SME features
+ * this could be replaced with the instruction ZERO {ZT0}.
+ */
+ asm volatile(".inst 0xc0480001" : : : );
+}
+
+#endif /* __aarch64__ */
+
+/*
+ * test_sme2_support: Test SME2 support when the extension is enabled.
+ *
+ * Execute some SME2 instructions. These should not be trapped to EL3,
+ * as TF-A is responsible for enabling SME2 for Non-secure world.
+ *
+ */
+test_result_t test_sme2_support(void)
+{
+ /* SME2 is an AArch64-only feature.*/
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ /* Skip the test if SME2 is not supported. */
+ SKIP_TEST_IF_SME2_NOT_SUPPORTED();
+
+ /*
+ * FEAT_SME2 adds a 512 BIT architectural register ZT0 to support
+ * the lookup-table feature.
+ * System register SMCR_ELx defines a bit SMCR_ELx.EZT0 bit [30] to
+ * enable/disable access to this register. SMCR_EL2_RESET_VAL enables
+ * this bit by default.
+ *
+ * Instructions to access ZT0 register are being tested to ensure
+ * SMCR_EL3.EZT0 bit is set by EL3 firmware so that EL2 access are not
+ * trapped.
+ */
+
+ /* Make sure we can acesss SME2 ZT0 storage, PSTATE.ZA = 1*/
+ VERBOSE("Enabling SME ZA storage and ZT0 storage.\n");
+
+ sme_smstart(SMSTART_ZA);
+
+ /*
+ * LDR (ZT0) : Load ZT0 register.
+ * Load the 64-byte ZT0 register from the memory address
+ * provided in the 64-bit scalar base register.
+ */
+ for (int i = 0; i < SME2_ARRAYSIZE; i++) {
+ sme2_input_buffer[i] = SME2_INPUT_DATA;
+ }
+ sme2_load_zt0_instruction(sme2_input_buffer);
+
+ /*
+ * STR (ZT0) : Store ZT0 register.
+ * Store the 64-byte ZT0 register to the memory address
+ * provided in the 64-bit scalar base register
+ */
+
+ sme2_store_zt0_instruction(sme2_output_buffer);
+
+ /**
+ * compare the input and output buffer to verify the operations of
+ * LDR and STR instructions with ZT0 register.
+ */
+ for (int i = 0; i < SME2_ARRAYSIZE; i++) {
+ if (sme2_input_buffer[i] != sme2_output_buffer[i]) {
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* ZER0 (ZT0) */
+ clear_ZT0();
+
+ /* Finally disable the acesss to SME2 ZT0 storage, PSTATE.ZA = 0*/
+ VERBOSE("Disabling SME ZA storage and ZT0 storage.\n");
+
+ sme_smstop(SMSTOP_ZA);
+
+ return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/spe/test_spe.c b/tftf/tests/extensions/spe/test_spe.c
new file mode 100644
index 000000000..d0d89ef50
--- /dev/null
+++ b/tftf/tests/extensions/spe/test_spe.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <test_helpers.h>
+
+test_result_t test_spe_support(void)
+{
+ /* SPE is an AArch64-only feature.*/
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ unsigned int spe_ver = spe_get_version();
+
+ assert(spe_ver <= ID_AA64DFR0_SPE_V1P4);
+
+ if (spe_ver == ID_AA64DFR0_SPE_NOT_SUPPORTED) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /*
+ * If runtime-EL3 does not enable access to SPE system
+ * registers from NS-EL2/NS-EL1 then read of these
+ * registers traps in EL3
+ */
+ read_pmscr_el1();
+ read_pmsfcr_el1();
+ read_pmsicr_el1();
+ read_pmsidr_el1();
+ read_pmsirr_el1();
+ read_pmslatfr_el1();
+ read_pmblimitr_el1();
+ read_pmbptr_el1();
+ read_pmbsr_el1();
+ read_pmsevfr_el1();
+ if (IS_IN_EL2()) {
+ read_pmscr_el2();
+ }
+ if (spe_ver == ID_AA64DFR0_SPE_V1P2) {
+ read_pmsnevfr_el1();
+ }
+
+ return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/sve/sve_operations.S b/tftf/tests/extensions/sve/sve_operations.S
deleted file mode 100644
index e528b2bfe..000000000
--- a/tftf/tests/extensions/sve/sve_operations.S
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <asm_macros.S>
-
-#include "./test_sve.h"
-
-#ifdef __aarch64__
-#if __GNUC__ > 8 || (__GNUC__ == 8 && __GNUC_MINOR__ > 0)
-
-/*
- * Based on example code from the Arm Compiler Scalable Vector Extension User
- * Guide[1].
- * [1] https://developer.arm.com/docs/100891/latest/getting-started-with-the-sve-compiler/compiling-c-and-c-code-for-sve-enabled-targets
- */
-
- .arch armv8.2-a+crc+fp16+sve
- .global sve_subtract_arrays
-func sve_subtract_arrays
- mov x4, SVE_ARRAYSIZE
- mov x5, x4
- mov x3, 0
- whilelo p0.s, xzr, x4
-.loop:
- ld1w z0.s, p0/z, [x1, x3, lsl 2]
- ld1w z1.s, p0/z, [x2, x3, lsl 2]
- sub z0.s, z0.s, z1.s
- st1w z0.s, p0, [x0, x3, lsl 2]
- incw x3
- whilelo p0.s, x3, x5
- bne .loop
- ret
-endfunc sve_subtract_arrays
-
-#endif /* __GNUC__ > 8 || (__GNUC__ == 8 && __GNUC_MINOR__ > 0) */
-#endif /* __aarch64__ */
diff --git a/tftf/tests/extensions/sve/test_sve.c b/tftf/tests/extensions/sve/test_sve.c
index 235e2b8e3..bdd76e180 100644
--- a/tftf/tests/extensions/sve/test_sve.c
+++ b/tftf/tests/extensions/sve/test_sve.c
@@ -8,15 +8,14 @@
#include <arch_helpers.h>
#include <debug.h>
#include <stdlib.h>
+#include <test_helpers.h>
#include <tftf_lib.h>
+#include <lib/extensions/sve.h>
#include "./test_sve.h"
#if __GNUC__ > 8 || (__GNUC__ == 8 && __GNUC_MINOR__ > 0)
-extern void sve_subtract_arrays(int *difference, const int *sve_op_1,
- const int *sve_op_2);
-
static int sve_difference[SVE_ARRAYSIZE];
static int sve_op_1[SVE_ARRAYSIZE];
static int sve_op_2[SVE_ARRAYSIZE];
@@ -32,11 +31,7 @@ static int sve_op_2[SVE_ARRAYSIZE];
*/
test_result_t test_sve_support(void)
{
- /* Check if SVE is implemented and usable */
- if (is_armv8_2_sve_present() == false) {
- tftf_testcase_printf("SVE support absent\n");
- return TEST_RESULT_SKIPPED;
- }
+ SKIP_TEST_IF_SVE_NOT_SUPPORTED();
for (int i = 0; i < SVE_ARRAYSIZE; i++) {
/* Generate a random number between 200 and 299 */
@@ -46,7 +41,7 @@ test_result_t test_sve_support(void)
}
/* Perform SVE operations */
- sve_subtract_arrays(sve_difference, sve_op_1, sve_op_2);
+ sve_subtract_arrays(sve_difference, sve_op_1, sve_op_2, SVE_ARRAYSIZE);
return TEST_RESULT_SUCCESS;
}
diff --git a/tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.c b/tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.c
new file mode 100644
index 000000000..6c28c8718
--- /dev/null
+++ b/tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <tftf.h>
+
+#include "./test_sys_reg_trace.h"
+
+static uint32_t get_trace_arch_ver(void)
+{
+ uint32_t val = read_trcdevarch();
+ val = (val >> TRCDEVARCH_ARCHVER_SHIFT) & TRCDEVARCH_ARCHVER_MASK;
+
+ return val;
+}
+
+/*
+ * EL3 is expected to allow access to trace system registers from EL2.
+ * Reading these register will trap to EL3 and crash when EL3 has not
+ * allowed access.
+ */
+test_result_t test_sys_reg_trace_enabled(void)
+{
+ SKIP_TEST_IF_SYS_REG_TRACE_NOT_SUPPORTED();
+
+ /*
+ * Read few ETMv4 system trace registers to verify correct access
+ * been provided from EL3.
+ */
+ uint32_t trace_arch_ver __unused = get_trace_arch_ver();
+ read_trcauxctlr();
+ read_trcccctlr();
+ read_trcbbctlr();
+ read_trcclaimset();
+ read_trcclaimclr();
+
+ /*
+ * Read few ETE system trace registers to verify correct access
+ * been provided from EL3. ETE system trace register access are
+ * not possible from NS-EL2 in aarch32 state.
+ */
+#if __aarch64__
+ if (trace_arch_ver == TRCDEVARCH_ARCHVER_ETE) {
+ read_trcrsr();
+ read_trcextinselr0();
+ read_trcextinselr1();
+ read_trcextinselr2();
+ read_trcextinselr3();
+ }
+#endif /* __aarch64__ */
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.h b/tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.h
new file mode 100644
index 000000000..640b82c28
--- /dev/null
+++ b/tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_SYS_REG_TRACE_H
+#define TEST_SYS_REG_TRACE_H
+
+/* TRCEDEVARCH definitions */
+#define TRCDEVARCH_ARCHVER_SHIFT U(12)
+#define TRCDEVARCH_ARCHVER_MASK U(0xf)
+#define TRCDEVARCH_ARCHVER_ETE U(0x5)
+
+#endif /* TEST_SYS_REG_TRACE_H */
diff --git a/tftf/tests/extensions/trbe/test_trbe.c b/tftf/tests/extensions/trbe/test_trbe.c
new file mode 100644
index 000000000..8ef9576e9
--- /dev/null
+++ b/tftf/tests/extensions/trbe/test_trbe.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <tftf.h>
+
+/*
+ * EL3 is expected to allow access to trace control registers from EL2.
+ * Reading these register will trap to EL3 and crash when EL3 has not
+ * allowed access.
+ */
+test_result_t test_trbe_enabled(void)
+{
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ SKIP_TEST_IF_TRBE_NOT_SUPPORTED();
+ read_trblimitr_el1();
+ read_trbptr_el1();
+ read_trbbaser_el1();
+ read_trbsr_el1();
+ read_trbmar_el1();
+ read_trbtrg_el1();
+ read_trbidr_el1();
+ return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/trf/test_trf.c b/tftf/tests/extensions/trf/test_trf.c
new file mode 100644
index 000000000..eeb967db8
--- /dev/null
+++ b/tftf/tests/extensions/trf/test_trf.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <tftf.h>
+
+/*
+ * EL3 is expected to allow access to trace filter control registers from EL2.
+ * Reading these register will trap to EL3 and crash when EL3 has not
+ * allowed access.
+ */
+test_result_t test_trf_enabled(void)
+{
+ SKIP_TEST_IF_TRF_NOT_SUPPORTED();
+
+#ifdef __aarch64__
+ read_trfcr_el1();
+ read_trfcr_el2();
+#else
+ read_htrfcr();
+ read_trfcr();
+#endif /* __aarch64__ */
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/extensions/wfxt/test_wfxt.c b/tftf/tests/extensions/wfxt/test_wfxt.c
new file mode 100644
index 000000000..bb3e4866d
--- /dev/null
+++ b/tftf/tests/extensions/wfxt/test_wfxt.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+typedef enum {
+ EXEC_WFIT = 0,
+ EXEC_WFET
+} exec_wfxt;
+
+#ifdef __aarch64__
+static test_result_t test_wfxt_inst(exec_wfxt val, uint64_t ms)
+{
+ __asm__ volatile(".arch armv8.7-a");
+ uint64_t timer_cnt1, timer_cnt2, feed_cnt;
+ uint64_t timer_freq = read_cntfrq_el0();
+ uint64_t ms_to_counts = ((ms * timer_freq) / 1000U);
+
+ timer_cnt1 = virtualcounter_read();
+ feed_cnt = timer_cnt1 + ms_to_counts;
+
+ if (val == EXEC_WFIT) {
+ wfit(feed_cnt);
+ } else {
+ wfet(feed_cnt);
+ }
+
+ timer_cnt2 = virtualcounter_read();
+
+ /* Lapsed time should be at least equal to sleep time */
+ if ((timer_cnt2 - timer_cnt1) >= ms_to_counts) {
+ return TEST_RESULT_SUCCESS;
+ } else {
+ /* unlikely ends up here */
+ uint64_t lapsed_ms = ((timer_cnt2 - timer_cnt1) * 1000) / timer_freq;
+
+ ERROR("Time elapsed: actual(%llu)ms vs requested(%llu)ms \n",
+ lapsed_ms, ms);
+ return TEST_RESULT_FAIL;
+ }
+}
+#endif /* __aarch64__ */
+
+test_result_t test_wfet_instruction(void)
+{
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ SKIP_TEST_IF_WFXT_NOT_SUPPORTED();
+
+ /*
+ * first invocation of wfe returns immediately clearing the event
+ * register
+ */
+ sevl();
+ wfe();
+
+ return test_wfxt_inst(EXEC_WFET, 10);
+#endif /* __aarch64__ */
+}
+
+test_result_t test_wfit_instruction(void)
+{
+ test_result_t ret;
+
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ SKIP_TEST_IF_WFXT_NOT_SUPPORTED();
+
+ /* disable irqs to run wfi till timeout */
+ disable_irq();
+
+ ret = test_wfxt_inst(EXEC_WFIT, 10);
+
+ /* enable irq back */
+ enable_irq();
+#endif /* __aarch64__ */
+
+ return ret;
+}
diff --git a/tftf/tests/misc_tests/inject_serror.S b/tftf/tests/misc_tests/inject_ras_error.S
index d42441dd3..1798a9049 100644
--- a/tftf/tests/misc_tests/inject_serror.S
+++ b/tftf/tests/misc_tests/inject_ras_error.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,8 +10,8 @@
#ifdef __aarch64__
- .globl inject_serror
- .globl inject_uncontainable
+ .globl inject_unrecoverable_ras_error
+ .globl inject_uncontainable_ras_error
.globl serror_sdei_event_handler
/*
@@ -21,13 +21,8 @@
* x0: Fault record number to program
* x1: Injected fault properties
* x2: Type of error to be generated
- * x3: Memory location to wait for, or 0 if no waiting is required
*/
-func inject_serror_record
- /* Clear SError received flag if necessary */
- cbz x3, 1f
- str xzr, [x3, #0]
-1:
+func inject_ras_error_record
/* Choose Error record 0 on the PE */
msr ERRSELR_EL1, x0
isb
@@ -45,25 +40,11 @@ func inject_serror_record
msr ERXPFGCTL_EL1, x2
isb
- /* If no waiting is required, jump to end */
- cbz x3, 3f
-
- sevl
-
-2:
- wfe
- ldr x0, [x3, #0]
- cbz x0, 2b
-
-3:
ret
-endfunc inject_serror_record
+endfunc inject_ras_error_record
-/*
- * Inject Unrecoverable error through fault record 0. Wait until serror_received
- * is set by the SDEI handler in response to receving the event.
- */
-func inject_serror
+/* Inject Unrecoverable error through fault record 0. */
+func inject_unrecoverable_ras_error
/* Inject fault into record 0 */
mov x0, #0
@@ -74,18 +55,14 @@ func inject_serror
/* Injected fault control */
mov x2, #ERXPFGCTL_UEU_BIT
- /* Wait address */
- adrp x3, serror_received
- add x3, x3, :lo12:serror_received
-
- b inject_serror_record
-endfunc inject_serror
+ b inject_ras_error_record
+endfunc inject_unrecoverable_ras_error
/*
* Inject Uncontainable error through fault record 0. This function doesn't wait
* as the handling is terminal in EL3.
*/
-func inject_uncontainable
+func inject_uncontainable_ras_error
/* Inject fault into record 0 */
mov x0, #0
@@ -94,18 +71,15 @@ func inject_uncontainable
/* Injected fault control */
mov x2, #ERXPFGCTL_UC_BIT
- /* Nothing to wait for */
- mov x3, xzr
-
- b inject_serror_record
-endfunc inject_uncontainable
+ b inject_ras_error_record
+endfunc inject_uncontainable_ras_error
/*
* SDEI event handler for SErrors.
*/
func serror_sdei_event_handler
stp x29, x30, [sp, #-16]!
- bl serror_handler
+ bl sdei_handler
ldp x29, x30, [sp], #16
mov_imm x0, SDEI_EVENT_COMPLETE
mov x1, xzr
diff --git a/tftf/tests/misc_tests/test_ea_ffh.c b/tftf/tests/misc_tests/test_ea_ffh.c
new file mode 100644
index 000000000..911962e1a
--- /dev/null
+++ b/tftf/tests/misc_tests/test_ea_ffh.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <mmio.h>
+#include <tftf_lib.h>
+#include <smccc.h>
+#include <xlat_tables_v2.h>
+
+#define TEST_ADDRESS UL(0x7FFFF000)
+
+/*
+ * Purpose of these tests is to ensure EA from lower EL trap/handled in EL3.
+ *
+ * Tests HANDLE_EA_EL3_FIRST_NS feature(SCR_EL3.EA = 1) of TF-A
+ *
+ * Works in conjunction with PLATFORM_TEST_EA_FFH macro in TF-A.
+ */
+
+/*
+ * This test maps a non-existent memory as Device memory and reads it.
+ * Memory is mapped as device and cause an error on bus and trap as an Sync EA.
+ */
+test_result_t test_inject_syncEA(void)
+{
+ int rc;
+
+ rc = mmap_add_dynamic_region(TEST_ADDRESS, TEST_ADDRESS, PAGE_SIZE,
+ MT_DEVICE | MT_RO | MT_NS);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mapping address %lu(%d) failed\n",
+ __LINE__, TEST_ADDRESS, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Try reading invalid address, which will cause an exception to be handled in EL3.
+ * EL3 after handling the exception returns to the next instruction to avoid
+ * continous exceptions.
+ */
+ rc = mmio_read_32(TEST_ADDRESS);
+
+ rc = mmap_remove_dynamic_region(TEST_ADDRESS, PAGE_SIZE);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_remove_dynamic_region() = %d\n", __LINE__, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * This test maps a non-existent memory as Device memory and write to it.
+ * Memory is mapped as device and cause an error on bus and trap as an SError.
+ */
+test_result_t test_inject_serror(void)
+{
+ int rc;
+
+ rc = mmap_add_dynamic_region(TEST_ADDRESS, TEST_ADDRESS, PAGE_SIZE,
+ MT_DEVICE | MT_RW | MT_NS);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mapping address %lu(%d) failed\n",
+ __LINE__, TEST_ADDRESS, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Try writing to invalid address */
+ mmio_write_32(TEST_ADDRESS, 1);
+
+ rc = mmap_remove_dynamic_region(TEST_ADDRESS, PAGE_SIZE);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_remove_dynamic_region() = %d\n", __LINE__, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/misc_tests/test_firmware_handoff.c b/tftf/tests/misc_tests/test_firmware_handoff.c
new file mode 100644
index 000000000..bd565aefa
--- /dev/null
+++ b/tftf/tests/misc_tests/test_firmware_handoff.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <transfer_list.h>
+
+extern u_register_t hw_config_base;
+extern u_register_t ns_tl;
+extern u_register_t tl_signature;
+
+#define DTB_PREAMBLE U(0xedfe0dd0)
+
+test_result_t test_handoff_header(void)
+{
+ struct transfer_list_header *tl = (struct transfer_list_header *)ns_tl;
+
+ assert((uint32_t)tl_signature ==
+ (REGISTER_CONVENTION_VERSION_MASK | TRANSFER_LIST_SIGNATURE));
+
+ if (transfer_list_check_header(tl) == TL_OPS_NON) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_handoff_dtb_payload(void)
+{
+ tftf_testcase_printf("Validating HW_CONFIG from transfer list.\n");
+ struct transfer_list_header *tl = (struct transfer_list_header *)ns_tl;
+ struct transfer_list_entry *te = (void *)tl + tl->hdr_size;
+ uintptr_t dtb_ptr;
+
+ te = transfer_list_find(tl, TL_TAG_FDT);
+
+ if (te == NULL) {
+ tftf_testcase_printf(
+ "Failed to find HW CONFIG TE in transfer list!");
+ return TEST_RESULT_FAIL;
+ }
+
+ dtb_ptr = (unsigned long)transfer_list_entry_data(te);
+
+ if ((dtb_ptr != hw_config_base) &&
+ (*(uint32_t *)dtb_ptr != DTB_PREAMBLE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/misc_tests/test_invalid_access.c b/tftf/tests/misc_tests/test_invalid_access.c
new file mode 100644
index 000000000..3baeed554
--- /dev/null
+++ b/tftf/tests/misc_tests/test_invalid_access.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "ffa_helpers.h"
+#include <plat/common/platform.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arch_features.h>
+#include <debug.h>
+#ifdef __aarch64__
+#include <spm_test_helpers.h>
+#include <sync.h>
+#endif
+#include <host_realm_helper.h>
+#include <lib/aarch64/arch_features.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <xlat_tables_v2.h>
+#include <platform_def.h>
+#include <cactus_test_cmds.h>
+#include <ffa_endpoints.h>
+
+/*
+ * Using "__aarch64__" here looks weird but its unavoidable because of following reason
+ * This test is part of standard test which runs on all platforms but pre-requisite
+ * to run this test (custom sync exception handler) is only implemented for aarch64.
+ * TODO: Write a framework so that tests kept in standard list can be selectively
+ * run on a given architecture
+ */
+#ifdef __aarch64__
+
+#define SENDER HYP_ID
+#define RECEIVER SP_ID(1)
+
+static volatile bool sync_exception_triggered;
+static volatile bool data_abort_triggered;
+static const struct ffa_uuid expected_sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
+};
+
+static __aligned(PAGE_SIZE) uint64_t share_page[PAGE_SIZE / sizeof(uint64_t)];
+
+static bool data_abort_handler(void)
+{
+ uint64_t esr_elx = IS_IN_EL2() ? read_esr_el2() : read_esr_el1();
+ unsigned int rme_supported = get_armv9_2_feat_rme_support();
+
+ sync_exception_triggered = true;
+
+ VERBOSE("%s esr_elx %llx\n", __func__, esr_elx);
+
+ if (EC_BITS(esr_elx) == EC_DABORT_CUR_EL) {
+ if (rme_supported == 0) {
+ /* Synchronous external data abort triggered by trustzone controller */
+ if ((ISS_BITS(esr_elx) & ISS_DFSC_MASK) == DFSC_EXT_DABORT) {
+ VERBOSE("%s TZC Data Abort caught\n", __func__);
+ data_abort_triggered = true;
+ return true;
+ }
+ } else {
+ /* Synchronous data abort triggered by Granule protection */
+ if ((ISS_BITS(esr_elx) & ISS_DFSC_MASK) == DFSC_GPF_DABORT) {
+ VERBOSE("%s GPF Data Abort caught\n", __func__);
+ data_abort_triggered = true;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+test_result_t el3_memory_cannot_be_accessed_in_ns(void)
+{
+ const uintptr_t test_address = EL3_MEMORY_ACCESS_ADDR;
+
+ VERBOSE("Attempt to access el3 memory (0x%lx)\n", test_address);
+
+ sync_exception_triggered = false;
+ data_abort_triggered = false;
+
+ int rc = mmap_add_dynamic_region(test_address, test_address, PAGE_SIZE,
+ MT_MEMORY | MT_RW | MT_NS);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_add_dynamic_region() = %d\n", __LINE__, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ register_custom_sync_exception_handler(data_abort_handler);
+ *((volatile uint64_t *)test_address);
+ unregister_custom_sync_exception_handler();
+
+ rc = mmap_remove_dynamic_region(test_address, PAGE_SIZE);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_remove_dynamic_region() = %d\n", __LINE__, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (sync_exception_triggered == false) {
+ tftf_testcase_printf("No sync exception while accessing (0x%lx)\n", test_address);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (data_abort_triggered == false) {
+ tftf_testcase_printf("Sync exception is not data abort\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * @Test_Aim@ Check a realm region cannot be accessed from normal world.
+ *
+ * This test delegates a TFTF allocated buffer to Realm. It then attempts
+ * a read access to the region from normal world. This results in the PE
+ * triggering a GPF caught by a custom synchronous abort handler.
+ *
+ */
+test_result_t rl_memory_cannot_be_accessed_in_ns(void)
+{
+ test_result_t result = TEST_RESULT_FAIL;
+ u_register_t retmm;
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ sync_exception_triggered = false;
+ data_abort_triggered = false;
+ register_custom_sync_exception_handler(data_abort_handler);
+
+ /* First read access to the test region must not fail. */
+ *((volatile uint64_t *)share_page);
+
+ if ((sync_exception_triggered != false) ||
+ (data_abort_triggered != false)) {
+ goto out_unregister;
+ }
+
+ host_rmi_init_cmp_result();
+
+ /* Delegate the shared page to Realm. */
+ retmm = host_rmi_granule_delegate((u_register_t)&share_page);
+ if (retmm != 0UL) {
+ ERROR("%s() failed\n", "host_rmi_granule_delegate");
+ goto out_unregister;
+ }
+
+ /* This access shall trigger a GPF. */
+ *((volatile uint64_t *)share_page);
+
+ if ((sync_exception_triggered != true) ||
+ (data_abort_triggered != true)) {
+ goto out_undelegate;
+ }
+
+ result = host_cmp_result();
+
+out_undelegate:
+ /* Undelegate the shared page. */
+ retmm = host_rmi_granule_undelegate((u_register_t)&share_page);
+ if (retmm != 0UL) {
+ ERROR("Granule undelegate failed, ret=0x%lx\n", retmm);
+ }
+
+out_unregister:
+ unregister_custom_sync_exception_handler();
+
+ return result;
+}
+
+/**
+ * @Test_Aim@ Check a secure region cannot be accessed from normal world.
+ *
+ * Following test intends to run on RME enabled platforms when EL3
+ * is Root world. In a non RME platform, EL3 is secure.
+ * Access to secure memory from NS world is already covered
+ * by el3_memory_cannot_be_accessed_in_ns.
+ */
+test_result_t s_memory_cannot_be_accessed_in_ns(void)
+{
+ const uintptr_t test_address = SECURE_MEMORY_ACCESS_ADDR;
+
+ /* skipp non RME platforms */
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ VERBOSE("Attempt to access secure memory (0x%lx)\n", test_address);
+
+ data_abort_triggered = false;
+ sync_exception_triggered = false;
+ register_custom_sync_exception_handler(data_abort_handler);
+ dsbsy();
+
+ int rc = mmap_add_dynamic_region(test_address, test_address, PAGE_SIZE,
+ MT_MEMORY | MT_RW | MT_NS);
+
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_add_dynamic_region() = %d\n", __LINE__, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ *((volatile uint64_t *)test_address);
+
+ mmap_remove_dynamic_region(test_address, PAGE_SIZE);
+
+ dsbsy();
+ unregister_custom_sync_exception_handler();
+
+ if (sync_exception_triggered == false) {
+ tftf_testcase_printf("No sync exception while accessing (0x%lx)\n", test_address);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (data_abort_triggered == false) {
+ tftf_testcase_printf("Sync exception is not data abort\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t memory_cannot_be_accessed_in_rl(u_register_t params)
+{
+ u_register_t retrmm;
+ test_result_t result = TEST_RESULT_FAIL;
+ static char rd[GRANULE_SIZE] __aligned(GRANULE_SIZE);
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ retrmm = host_rmi_granule_delegate((u_register_t)&rd[0]);
+ if (retrmm != 0UL) {
+ ERROR("%s() failed, ret=0x%lx\n", "host_rmi_granule_delegate",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Create a realm using a parameter in a secure physical address space should fail. */
+ retrmm = host_rmi_realm_create((u_register_t)&rd[0], params);
+ if (retrmm == 0UL) {
+ ERROR("Realm create operation should fail, %lx\n", retrmm);
+ retrmm = host_rmi_realm_destroy((u_register_t)&rd[0]);
+ if (retrmm != 0UL) {
+ ERROR("Realm destroy operation returns fail, %lx\n", retrmm);
+ }
+ } else if (retrmm != RMI_ERROR_INPUT) {
+ ERROR("Realm create operation should fail with code:%d retrmm:%ld\n",
+ RMI_ERROR_INPUT, retrmm);
+ } else {
+ result = TEST_RESULT_SUCCESS;
+ }
+
+ retrmm = host_rmi_granule_undelegate((u_register_t)&rd[0]);
+ if (retrmm != 0UL) {
+ INFO("Undelegate operation returns 0x%lx\n", retrmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (result == TEST_RESULT_SUCCESS) {
+ return host_cmp_result();
+ }
+
+ return TEST_RESULT_FAIL;
+}
+
+/**
+ * @Test_Aim@ Check a root region cannot be accessed from a secure partition.
+ *
+ * A hardcoded address marked Root in the GPT is shared to a secure
+ * partition. The operation fails given the memory shared needs to be
+ * preconfigured in the memory ranges described in the SPMC manifest. The ranges
+ * related with S/NS memory that the SP can access shall never contain
+ * realm/root memory as this incurs into a configuration error.
+ * This test validates the SP can't get access to root memory via FF-A memory
+ * sharing interfaces.
+ */
+test_result_t rt_memory_cannot_be_accessed_in_s(void)
+{
+ const uintptr_t test_address = EL3_MEMORY_ACCESS_ADDR;
+ struct ffa_memory_region_constituent constituents[] = {
+ {
+ (void *)test_address, 1, 0
+ }
+ };
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+ ffa_memory_handle_t handle;
+ struct mailbox_buffers mb;
+ struct ffa_value ret;
+
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(
+ RECEIVER, FFA_MEM_SHARE_SMC32);
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ handle = memory_init_and_send(mb.send, PAGE_SIZE, SENDER, &receiver, 1,
+ constituents, constituents_count,
+ FFA_MEM_SHARE_SMC32, &ret);
+
+ if (handle == FFA_MEMORY_HANDLE_INVALID) {
+ return TEST_RESULT_SUCCESS;
+ }
+
+ return TEST_RESULT_FAIL;
+}
+
+test_result_t s_memory_cannot_be_accessed_in_rl(void)
+{
+ u_register_t params = (u_register_t)SECURE_MEMORY_ACCESS_ADDR;
+ return memory_cannot_be_accessed_in_rl(params);
+}
+
+test_result_t rt_memory_cannot_be_accessed_in_rl(void)
+{
+ u_register_t params = (u_register_t)EL3_MEMORY_ACCESS_ADDR;
+ return memory_cannot_be_accessed_in_rl(params);
+}
+
+#else
+
+test_result_t el3_memory_cannot_be_accessed_in_ns(void)
+{
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t rl_memory_cannot_be_accessed_in_ns(void)
+{
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t s_memory_cannot_be_accessed_in_ns(void)
+{
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t s_memory_cannot_be_accessed_in_rl(void)
+{
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t rt_memory_cannot_be_accessed_in_rl(void)
+{
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+#endif /* __aarch64__ */
diff --git a/tftf/tests/misc_tests/test_nop.c b/tftf/tests/misc_tests/test_nop.c
new file mode 100644
index 000000000..d3c4386ab
--- /dev/null
+++ b/tftf/tests/misc_tests/test_nop.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <power_management.h>
+#include <test_helpers.h>
+
+__attribute__((noinline))
+static void debug_hook_func(void)
+{
+ __asm__ volatile(
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ "debug_hook:\n"
+ ".global debug_hook\n"
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ );
+
+ return;
+}
+
+static test_result_t secondary_cpu(void)
+{
+ debug_hook_func();
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * This is intended for use in conjunction with Trusted Firmware eXplorer
+ * (TFX).
+ *
+ * 1. Power up all secondary CPUs and execute test_nop.
+ * 2. TFX is expected to set a breakpoint on debug_hook. When this is hit,
+ * TFX takes over control and starts injecting test code.
+ * 3. Once the test is complete, TFX powers down all CPUs.
+ */
+test_result_t test_nop(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ long long ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /* Start all other CPUs */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+
+ ret = tftf_cpu_on(target_mpid, (uintptr_t)secondary_cpu, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x0x%llx\n", (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Do the actual work */
+ debug_hook_func();
+
+ /* Wait for other CPUs to complete */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) != PSCI_STATE_OFF) {
+ continue;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/misc_tests/test_ras_ffh_nested.c b/tftf/tests/misc_tests/test_ras_ffh_nested.c
new file mode 100644
index 000000000..99c71b750
--- /dev/null
+++ b/tftf/tests/misc_tests/test_ras_ffh_nested.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <psci.h>
+#include <sdei.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+
+#ifdef __aarch64__
+static volatile uint64_t sdei_event_received;
+extern void inject_unrecoverable_ras_error(void);
+extern int serror_sdei_event_handler(int ev, uint64_t arg);
+
+int sdei_handler(int ev, uint64_t arg)
+{
+ sdei_event_received = 1;
+ tftf_testcase_printf("SError SDEI event received.\n");
+ return 0;
+}
+
+/*
+ * Test to verify nested exception handling of SErrors in EL3.
+ *
+ * This test exercises the path of EL3 nested exception handling of SErrors
+ * during SMC exception handling. In SMC exception handling vector path
+ * during synchronization of errors, a pending async EA is detected which
+ * gets handled in EL3 (as it is in FFH mode) as a nested exception. Original
+ * SMC call is handled after async EA is handled.
+ *
+ * This test works in conjunction with "ras_ffh_nested.patch"
+ * present in CI repository.
+ *
+ * Test steps:
+ * 1. TF-A is build for Firmware first handling for RAS errors.
+ * 2. Register/enable SDEI event notification for RAS error.
+ * 3. Make an SMC call to get the SMCCC version which will be used for
+ * comparing later on, along with that it also changes SCR_EL3.EA=0 to
+ * route SError to TFTF. This allow SError to be pended when next SMC
+ * call is made.
+ * 4. Disable SError (PSTATE.A = 1)
+ * 5. Inject RAS error and give time for it to trigger.
+ * 6. At this point SError is pended (ISR_EL1 = 0x100)
+ * 7. Make SMC call to get the version
+ * 8. On entering EL3, sync_exception_vector entry, will find that SError is
+ * pending.
+ * 9. Based on FFH routing model EL3 will call "handle_pending_async_ea" to
+ * handle nested exception SError first.
+ * 10.RAS error will be handled by platform handler and be notified to TFTF
+ * through SDEI handler.
+ * 12.Once the control returns back to vector entry of SMC, EL3 will continue
+ * with original SMC request.
+ *
+ * Checks:
+ * 1. Ensure that we did recieve SDEI notification
+ * 2. Ensure that second SMC request was successful.
+ *
+ */
+test_result_t test_ras_ffh_nested(void)
+{
+ int64_t ret;
+ const int event_id = 5000;
+ smc_args args;
+ smc_ret_values smc_ret;
+ u_register_t expected_ver;
+
+ /* Register SDEI handler */
+ ret = sdei_event_register(event_id, serror_sdei_event_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(event_id);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_pe_unmask();
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Get the version to compare against */
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_VERSION;
+ smc_ret = tftf_smc(&args);
+ expected_ver = smc_ret.ret0;
+ smc_ret.ret0 = 0;
+
+ disable_serror();
+
+ inject_unrecoverable_ras_error();
+
+ waitms(50);
+
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_VERSION;
+
+ /* Ensure that we are testing reflection path, SMC before SError */
+ if (sdei_event_received == true) {
+ tftf_testcase_printf("SError was triggered before SMC\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ smc_ret = tftf_smc(&args);
+
+ tftf_testcase_printf("SMCCC Version = %d.%d\n",
+ (int)((smc_ret.ret0 >> SMCCC_VERSION_MAJOR_SHIFT) & SMCCC_VERSION_MAJOR_MASK),
+ (int)((smc_ret.ret0 >> SMCCC_VERSION_MINOR_SHIFT) & SMCCC_VERSION_MINOR_MASK));
+
+ if ((int32_t)smc_ret.ret0 != expected_ver) {
+ printf("Unexpected SMCCC version: 0x%x\n", (int)smc_ret.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (sdei_event_received == false) {
+ tftf_testcase_printf("SError is not triggered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+#else
+test_result_t test_ras_ffh_nested(void)
+{
+ tftf_testcase_printf("Not supported on AArch32.\n");
+ return TEST_RESULT_SKIPPED;
+}
+#endif
diff --git a/tftf/tests/misc_tests/test_ras_kfh.c b/tftf/tests/misc_tests/test_ras_kfh.c
new file mode 100644
index 000000000..b38d6c42c
--- /dev/null
+++ b/tftf/tests/misc_tests/test_ras_kfh.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <arch_helpers.h>
+#include <serror.h>
+#include <tftf_lib.h>
+
+#ifdef __aarch64__
+static volatile uint64_t serror_triggered;
+extern void inject_unrecoverable_ras_error(void);
+
+static bool serror_handler(void)
+{
+ serror_triggered = 1;
+ return true;
+}
+
+/*
+ * Test Kernel First handling paradigm of RAS errors.
+ *
+ * Register a custom serror handler in tftf, inject a RAS error and wait
+ * for finite time to ensure that SError triggered and handled.
+ */
+test_result_t test_ras_kfh(void)
+{
+ register_custom_serror_handler(serror_handler);
+ inject_unrecoverable_ras_error();
+
+ /* Give reasonable time for SError to be triggered/handled */
+ waitms(500);
+
+ unregister_custom_serror_handler();
+
+ if (serror_triggered == false) {
+ tftf_testcase_printf("SError is not triggered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+#else
+test_result_t test_ras_kfh(void)
+{
+ tftf_testcase_printf("Not supported on AArch32.\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+#endif
diff --git a/tftf/tests/misc_tests/test_ras_kfh_reflect.c b/tftf/tests/misc_tests/test_ras_kfh_reflect.c
new file mode 100644
index 000000000..d24fc4792
--- /dev/null
+++ b/tftf/tests/misc_tests/test_ras_kfh_reflect.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <drivers/arm/arm_gic.h>
+#include <irq.h>
+#include <platform.h>
+#include <psci.h>
+#include <serror.h>
+#include <sgi.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+
+#ifdef __aarch64__
+static volatile uint64_t serror_triggered;
+static volatile uint64_t irq_triggered;
+static u_register_t expected_ver;
+extern void inject_unrecoverable_ras_error(void);
+
+/*
+ * Tests to verify reflection of lower EL SErrors in RAS KFH mode.
+ *
+ * These tests exercises the path of EL3 reflection of SError back to lower
+ * EL, which gets triggered as part of error synchronization during EL3
+ * entry. This test works in conjunction with "ras_kfh_reflection.patch"
+ * present in CI repository.
+ *
+ * One test each to verify reflection in sync and async exception.
+ *
+ */
+static bool serror_handler(void)
+{
+ serror_triggered = 1;
+ tftf_testcase_printf("SError event received.\n");
+ return true;
+}
+
+static int irq_handler(void *data)
+{
+ irq_triggered = 1;
+ tftf_testcase_printf("IRQ received.\n");
+ return true;
+}
+
+/*
+ * Test Steps:
+ * 1. Register a custom SError handler for tftf
+ * 2. Make an SMC call to get the SMCCC version which will be used for
+ * comparing later on, along with that it also changes SCR_EL3.I = 1
+ * to route IRQ to EL3.
+ * 3. Disable SError (PSTATE.A = 1)
+ * 4. Inject RAS error and give time for it to trigger.
+ * 5. Register an SGI handler and inject SGI.
+ * 6. Becaue the IRQ is targeted to EL3 it will trap in EL3 irq_vector_entry
+ * 7. On entering EL3 it will find that SError is pending, So it will call
+ * "reflect_pending_serror_to_lower_el" and eret.
+ * 8. TF-A will eret back from EL3(without handling IRQ) and during ERET
+ * change SCR_EL3.I back to 0 along with unmasking SError for TFTF.
+ * SPSR.PSTATE.A = 0.
+ * 9. At tftf entry it will see both IRQ and SError pending, so it can take
+ * either of exception first (based on priority of SError/IRQ). The fvp model
+ * on which it was tested, IRQ is taken first.
+ * 10.First IRQ handler will be called and then SError handler will called.
+ *
+ */
+test_result_t test_ras_kfh_reflect_irq(void)
+{
+ smc_args args;
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ const unsigned int sgi_id = IRQ_NS_SGI_0;
+ smc_ret_values smc_ret;
+ int ret;
+
+ /* Get the SMCCC version to compare against */
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_VERSION;
+ smc_ret = tftf_smc(&args);
+ expected_ver = smc_ret.ret0;
+
+ register_custom_serror_handler(serror_handler);
+ disable_serror();
+ inject_unrecoverable_ras_error();
+
+ waitms(50);
+
+ ret = tftf_irq_register_handler(sgi_id, irq_handler);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register initial IRQ handler\n");
+ return TEST_RESULT_FAIL;
+ }
+ tftf_irq_enable(sgi_id, GIC_HIGHEST_NS_PRIORITY);
+ tftf_send_sgi(sgi_id, core_pos);
+
+ if ((serror_triggered == false) || (irq_triggered == false)) {
+ tftf_testcase_printf("SError or IRQ is not triggered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = tftf_irq_unregister_handler(sgi_id);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to unregister IRQ handler\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ unregister_custom_serror_handler();
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Test Steps:
+ * 1. Register a custom SError handler for tftf
+ * 3. Disable SError (PSTATE.A = 1)
+ * 4. Inject RAS error and give time for it to trigger.
+ * 5. Ensure SError is not triggered before making SMC call.
+ * 7. On entering EL3 it will find that SError is pending, So it will call
+ * "reflect_pending_serror_to_lower_el" and eret.
+ * 8. TF-A will eret back from EL3(without handling SMC) and during ERET
+ * unmask SError for TFTF (SPSR.PSTATE.A = 0).
+ * 9 .At TFTF entry it will see an SError pending which will cause registered
+ * SError handler to be called.
+ * 10.After retruning back from EL3 the original SMC request will be handled.
+ */
+test_result_t test_ras_kfh_reflect_sync(void)
+{
+ smc_args args;
+ smc_ret_values ret;
+
+ serror_triggered = 0;
+
+ register_custom_serror_handler(serror_handler);
+ disable_serror();
+ inject_unrecoverable_ras_error();
+
+ waitms(50);
+
+ /* Ensure that we are testing reflection path, SMC before SError */
+ if (serror_triggered == true) {
+ tftf_testcase_printf("SError was triggered before SMC\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_VERSION;
+ ret = tftf_smc(&args);
+ tftf_testcase_printf("SMCCC Version = %d.%d\n",
+ (int)((ret.ret0 >> SMCCC_VERSION_MAJOR_SHIFT) & SMCCC_VERSION_MAJOR_MASK),
+ (int)((ret.ret0 >> SMCCC_VERSION_MINOR_SHIFT) & SMCCC_VERSION_MINOR_MASK));
+
+ if ((int32_t)ret.ret0 != expected_ver) {
+ tftf_testcase_printf("Unexpected SMCCC version: 0x%x\n", (int)ret.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ unregister_custom_serror_handler();
+
+ if (serror_triggered == false) {
+ tftf_testcase_printf("SError is not triggered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+#else
+test_result_t test_ras_kfh_reflect_irq(void)
+{
+ tftf_testcase_printf("Not supported on AArch32.\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t test_ras_kfh_reflect_sync(void)
+{
+ tftf_testcase_printf("Not supported on AArch32.\n");
+ return TEST_RESULT_SKIPPED;
+}
+#endif
diff --git a/tftf/tests/misc_tests/test_single_fault.c b/tftf/tests/misc_tests/test_single_fault.c
index f55d8de8d..cacd0a766 100644
--- a/tftf/tests/misc_tests/test_single_fault.c
+++ b/tftf/tests/misc_tests/test_single_fault.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,20 +11,18 @@
#ifdef __aarch64__
-uint64_t serror_received;
-
-extern void inject_serror(void);
+static volatile uint64_t sdei_event_received;
+extern void inject_unrecoverable_ras_error(void);
+extern int serror_sdei_event_handler(int ev, uint64_t arg);
-int serror_handler(int ev, uint64_t arg)
+int sdei_handler(int ev, uint64_t arg)
{
- serror_received = 1;
+ sdei_event_received = 1;
tftf_testcase_printf("SError SDEI event received.\n");
return 0;
}
-extern int serror_sdei_event_handler(int ev, uint64_t arg);
-
test_result_t test_single_fault(void)
{
int64_t ret;
@@ -51,7 +49,12 @@ test_result_t test_single_fault(void)
return TEST_RESULT_FAIL;
}
- inject_serror();
+ inject_unrecoverable_ras_error();
+
+ /* Wait until the SError fires */
+ do {
+ dmbish();
+ } while (sdei_event_received == 0);
return TEST_RESULT_SUCCESS;
}
diff --git a/tftf/tests/misc_tests/test_uncontainable.c b/tftf/tests/misc_tests/test_uncontainable.c
index 231e5e8cf..5250f0a33 100644
--- a/tftf/tests/misc_tests/test_uncontainable.c
+++ b/tftf/tests/misc_tests/test_uncontainable.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,11 +8,11 @@
#ifdef __aarch64__
-extern void inject_uncontainable(void);
+extern void inject_uncontainable_ras_error(void);
test_result_t test_uncontainable(void)
{
- inject_uncontainable();
+ inject_uncontainable_ras_error();
return TEST_RESULT_SUCCESS;
}
diff --git a/tftf/tests/misc_tests/test_undef_injection.c b/tftf/tests/misc_tests/test_undef_injection.c
new file mode 100644
index 000000000..2d925a213
--- /dev/null
+++ b/tftf/tests/misc_tests/test_undef_injection.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <assert.h>
+#include <debug.h>
+#include <smccc.h>
+#include <sync.h>
+#include <tftf_lib.h>
+#include <platform_def.h>
+
+static volatile bool undef_injection_triggered;
+
+static bool undef_injection_handler(void)
+{
+ uint64_t esr_el2 = read_esr_el2();
+ if (EC_BITS(esr_el2) == EC_UNKNOWN) {
+ VERBOSE("UNDEF injection from EL3\n");
+ undef_injection_triggered = true;
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Test to verify UNDEF injection support in TF-A
+ *
+ * This test tries to access FGT EL2 registers which traps to EL3 and then
+ * the error is injected back from EL3 to TFTF to ensure that injection
+ * logic in TF-A is working, it also ensures that EL3 is still functional
+ * after UNDEF injection.
+ *
+ * To trap FGT register access to EL3, we run this test on a model with
+ * FEAT_FGT present but the traps from EL3 are not disabled by setting
+ * ENABLE_FEAT_FGT = 0
+ */
+test_result_t test_undef_injection(void)
+{
+ undef_injection_triggered = false;
+
+ register_custom_sync_exception_handler(undef_injection_handler);
+
+ /* Try to access a register which traps to EL3 */
+ read_hfgitr_el2();
+
+ unregister_custom_sync_exception_handler();
+
+ /* Ensure that EL3 still functional */
+ smc_args args;
+ smc_ret_values smc_ret;
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_VERSION;
+ smc_ret = tftf_smc(&args);
+
+ tftf_testcase_printf("SMCCC Version = %d.%d\n",
+ (int)((smc_ret.ret0 >> SMCCC_VERSION_MAJOR_SHIFT) & SMCCC_VERSION_MAJOR_MASK),
+ (int)((smc_ret.ret0 >> SMCCC_VERSION_MINOR_SHIFT) & SMCCC_VERSION_MINOR_MASK));
+
+ if (undef_injection_triggered == false) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/performance_tests/test_psci_latencies.c b/tftf/tests/performance_tests/test_psci_latencies.c
index 8a7a1d032..b20fe887b 100644
--- a/tftf/tests/performance_tests/test_psci_latencies.c
+++ b/tftf/tests/performance_tests/test_psci_latencies.c
@@ -34,27 +34,6 @@ static event_t target_booted, target_keep_on_booted, target_keep_on;
*/
#define BASELINE_VARIANCE 10
-/*
- * Utility function to wait for all CPUs other than the caller to be
- * OFF.
- */
-static void wait_for_non_lead_cpus(void)
-{
- unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
- unsigned int target_mpid, target_node;
-
- for_each_cpu(target_node) {
- target_mpid = tftf_get_mpidr_from_node(target_node);
- /* Skip lead CPU, as it is powered on */
- if (target_mpid == lead_mpid)
- continue;
-
- while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0)
- != PSCI_STATE_OFF)
- ;
- }
-}
-
static test_result_t test_target_function(void)
{
tftf_send_event(&target_booted);
diff --git a/tftf/tests/plat/xilinx/common/plat_pm.c b/tftf/tests/plat/xilinx/common/plat_pm.c
new file mode 100644
index 000000000..7f43824fe
--- /dev/null
+++ b/tftf/tests/plat/xilinx/common/plat_pm.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+
+#include <platform_def.h>
+
+/* Number of 32bits values in payload */
+#define PAYLOAD_ARG_CNT 4U
+
+#define upper_32_bits(n) ((uint32_t)(((n) >> 32)))
+#define lower_32_bits(n) ((uint32_t)((n) & 0xffffffff))
+
+
+#define PM_GET_API_VERSION 0xC2000001
+#define PM_GET_CHIPID 0xC2000018
+
+
+/*
+ * @Test_Aim@ Test to read the PM-API version from AMD-Xilinx platform
+ * This test run on lead CPU and issues PM_GET_API_VERSION SMC call to read the
+ * supported PM-API version on the platform.
+ * Return vslues are packed as
+ * ret.ret0(31:0) : actual return value
+ * ret.ret0(63:32) : Return arg1
+ * ret.ret1(31:0) : Return arg2
+ * ret.ret1(63:32) : Return arg3 and so on.
+ */
+test_result_t test_pmapi_version(void)
+{
+ smc_args args = { PM_GET_API_VERSION };
+ smc_ret_values ret;
+ uint32_t major, minor, status;
+
+ ret = tftf_smc(&args);
+ status = lower_32_bits(ret.ret0);
+ if (status) {
+ tftf_testcase_printf("%s ERROR Reading PM-API Version\n",
+ __func__);
+ return TEST_RESULT_FAIL;
+ }
+
+ major = upper_32_bits(ret.ret0) >> 16;
+ minor = upper_32_bits(ret.ret0) & 0xFFFF;
+
+ tftf_testcase_printf("%s PM-API Version : %d.%d\n", __func__,
+ major, minor);
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test to read the Chip ID of AMD-Xilinx platforms.
+ * This test runs on Lead CPU and issues PM_GET_CHIPID SMC call to read ChipID
+ * The IDcode and version is printed
+ * Return vslues are packed as
+ * ret.ret0(31:0) : actual return value
+ * ret.ret0(63:32) : Return arg1
+ * ret.ret1(31:0) : Return arg2
+ * ret.ret1(63:32) : Return arg3 and so on.
+ */
+test_result_t test_get_chipid(void)
+{
+ smc_args args = { PM_GET_CHIPID };
+ smc_ret_values ret;
+ uint32_t idcode, version, status;
+
+ ret = tftf_smc(&args);
+ status = lower_32_bits(ret.ret0);
+ if (status) {
+ tftf_testcase_printf("%s ERROR Reading Chip ID\n", __func__);
+ return TEST_RESULT_FAIL;
+ }
+
+ idcode = upper_32_bits(ret.ret0);
+ version = lower_32_bits(ret.ret1);
+
+ tftf_testcase_printf("%s Idcode = 0x%x Version = 0x%x\n", __func__,
+ idcode, version);
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_3.c b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_3.c
new file mode 100644
index 000000000..ebf40a531
--- /dev/null
+++ b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_3.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <psci.h>
+#include <smccc.h>
+#include <string.h>
+#include <tftf_lib.h>
+
+#ifdef __aarch64__
+#define CORTEX_A57_MIDR 0x410FD070
+#define CORTEX_A72_MIDR 0x410FD080
+#define CORTEX_A73_MIDR 0x410FD090
+#define CORTEX_A75_MIDR 0x410FD0A0
+
+static int cortex_a57_test(void);
+static int cortex_a73_test(void);
+static int cortex_a75_test(void);
+static int csv2_test(void);
+
+static struct ent {
+ unsigned int midr;
+ int (*wa_required)(void);
+} entries[] = {
+ { .midr = CORTEX_A57_MIDR, .wa_required = cortex_a57_test },
+ { .midr = CORTEX_A72_MIDR, .wa_required = csv2_test },
+ { .midr = CORTEX_A73_MIDR, .wa_required = cortex_a73_test },
+ { .midr = CORTEX_A75_MIDR, .wa_required = cortex_a75_test }
+};
+
+static int cortex_a57_test(void)
+{
+ return 1;
+}
+
+static int cortex_a73_test(void)
+{
+ return 1;
+}
+
+static int cortex_a75_test(void)
+{
+ return 1;
+}
+
+static int csv2_test(void)
+{
+ uint64_t pfr0;
+
+ pfr0 = read_id_aa64pfr0_el1() >> ID_AA64PFR0_CSV2_SHIFT;
+ if ((pfr0 & ID_AA64PFR0_CSV2_MASK) == 1) {
+ return 0;
+ }
+ return 1;
+}
+
+static test_result_t test_smccc_entrypoint(void)
+{
+ smc_args args;
+ smc_ret_values ret;
+ int32_t expected_ver;
+ unsigned int my_midr, midr_mask;
+ int wa_required;
+ size_t i;
+
+ /* Check if SMCCC version is at least v1.1 */
+ expected_ver = MAKE_SMCCC_VERSION(1, 1);
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_VERSION;
+ ret = tftf_smc(&args);
+ if ((int32_t)ret.ret0 < expected_ver) {
+ tftf_testcase_printf("Unexpected SMCCC version: 0x%x\n",
+ (int)ret.ret0);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Check if SMCCC_ARCH_WORKAROUND_3 is required or not */
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_ARCH_FEATURES;
+ args.arg1 = SMCCC_ARCH_WORKAROUND_3;
+ ret = tftf_smc(&args);
+ if ((int)ret.ret0 == -1) {
+ tftf_testcase_printf("SMCCC_ARCH_WORKAROUND_3 is not implemented\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* If the call returns 0, it means the workaround is required */
+ if ((int)ret.ret0 == 0) {
+ wa_required = 1;
+ } else {
+ wa_required = 0;
+ }
+
+ /* Check if the SMC return value matches our expectations */
+ my_midr = (unsigned int)read_midr_el1();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ for (i = 0; i < ARRAY_SIZE(entries); i++) {
+ struct ent *entp = &entries[i];
+
+ if ((my_midr & midr_mask) == (entp->midr & midr_mask)) {
+ if (entp->wa_required() != wa_required) {
+ return TEST_RESULT_FAIL;
+ }
+ break;
+ }
+ }
+ if ((i == ARRAY_SIZE(entries)) && wa_required) {
+ tftf_testcase_printf("TFTF workaround table out of sync with TF-A\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Invoke the workaround to make sure nothing nasty happens */
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_ARCH_WORKAROUND_3;
+ tftf_smc(&args);
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_smccc_arch_workaround_3(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /* Power on all the non-lead cores. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)test_smccc_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ /*
+ * Wait for test_smccc_entrypoint to return
+ * and the CPU to power down
+ */
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF) {
+ continue;
+ }
+ }
+
+ return test_smccc_entrypoint();
+}
+#else
+test_result_t test_smccc_arch_workaround_3(void)
+{
+ INFO("%s skipped on AArch32\n", __func__);
+ return TEST_RESULT_SKIPPED;
+}
+#endif
diff --git a/tftf/tests/runtime_services/host_realm_managment/host_pmuv3.c b/tftf/tests/runtime_services/host_realm_managment/host_pmuv3.c
new file mode 100644
index 000000000..042132ed8
--- /dev/null
+++ b/tftf/tests/runtime_services/host_realm_managment/host_pmuv3.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+#include <stdlib.h>
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <test_helpers.h>
+
+#include <host_realm_helper.h>
+#include <host_realm_pmu.h>
+#include <platform.h>
+
+#define MAX_COUNTERS 31
+
+/* PMCCFILTR_EL0 mask */
+#define PMCCFILTR_EL0_MASK ( \
+ PMCCFILTR_EL0_P_BIT | \
+ PMCCFILTR_EL0_U_BIT | \
+ PMCCFILTR_EL0_NSK_BIT | \
+ PMCCFILTR_EL0_NSH_BIT | \
+ PMCCFILTR_EL0_M_BIT | \
+ PMCCFILTR_EL0_RLK_BIT | \
+ PMCCFILTR_EL0_RLU_BIT | \
+ PMCCFILTR_EL0_RLH_BIT)
+
+/* PMEVTYPER<n>_EL0 mask */
+#define PMEVTYPER_EL0_MASK ( \
+ PMEVTYPER_EL0_P_BIT | \
+ PMEVTYPER_EL0_U_BIT | \
+ PMEVTYPER_EL0_NSK_BIT | \
+ PMEVTYPER_EL0_NSU_BIT | \
+ PMEVTYPER_EL0_NSH_BIT | \
+ PMEVTYPER_EL0_M_BIT | \
+ PMEVTYPER_EL0_RLK_BIT | \
+ PMEVTYPER_EL0_RLU_BIT | \
+ PMEVTYPER_EL0_RLH_BIT | \
+ PMEVTYPER_EL0_EVTCOUNT_BITS)
+
+/* PMSELR_EL0 mask */
+#define PMSELR_EL0_MASK 0x1F
+
+#define WRITE_PMEV_REGS(n) { \
+ case n: \
+ pmu_ptr->pmevcntr_el0[n] = rand64(); \
+ write_pmevcntrn_el0(n, pmu_ptr->pmevcntr_el0[n]); \
+ pmu_ptr->pmevtyper_el0[n] = rand() & PMEVTYPER_EL0_MASK;\
+ write_pmevtypern_el0(n, pmu_ptr->pmevtyper_el0[n]); \
+}
+
+#define CHECK_PMEV_REG(n, reg) { \
+ read_val = read_##reg##n_el0(n); \
+ if (read_val != pmu_ptr->reg##_el0[n]) { \
+ ERROR("Corrupted "#reg"%d_el0=0x%lx (0x%lx)\n", \
+ n, read_val, pmu_ptr->reg##_el0[n]); \
+ return false; \
+ } \
+}
+
+#define CHECK_PMEV_REGS(n) { \
+ case n: \
+ CHECK_PMEV_REG(n, pmevcntr); \
+ CHECK_PMEV_REG(n, pmevtyper); \
+}
+
+#define WRITE_PMREG(reg, mask) { \
+ pmu_ptr->reg = rand64() & mask; \
+ write_##reg(pmu_ptr->reg); \
+}
+
+#define CHECK_PMREG(reg) { \
+ read_val = read_##reg(); \
+ val = pmu_ptr->reg; \
+ if (read_val != val) { \
+ ERROR("Corrupted "#reg"=0x%lx (0x%lx)\n", \
+ read_val, val); \
+ return false; \
+ } \
+}
+
+struct pmu_registers {
+ unsigned long pmcr_el0;
+ unsigned long pmcntenset_el0;
+ unsigned long pmovsset_el0;
+ unsigned long pmintenset_el1;
+ unsigned long pmccntr_el0;
+ unsigned long pmccfiltr_el0;
+ unsigned long pmuserenr_el0;
+
+ unsigned long pmevcntr_el0[MAX_COUNTERS];
+ unsigned long pmevtyper_el0[MAX_COUNTERS];
+
+ unsigned long pmselr_el0;
+ unsigned long pmxevcntr_el0;
+ unsigned long pmxevtyper_el0;
+
+} __aligned(CACHE_WRITEBACK_GRANULE);
+
+static struct pmu_registers pmu_state[PLATFORM_CORE_COUNT];
+
+void host_set_pmu_state(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ struct pmu_registers *pmu_ptr = &pmu_state[core_pos];
+ unsigned int num_cnts = GET_CNT_NUM;
+ unsigned long val;
+
+ val = read_pmcr_el0() | PMCR_EL0_DP_BIT;
+ pmu_ptr->pmcr_el0 = val;
+
+ /* Disable cycle counting and reset all counters */
+ write_pmcr_el0(val | PMCR_EL0_C_BIT | PMCR_EL0_P_BIT);
+
+ /* Disable all counters */
+ pmu_ptr->pmcntenset_el0 = 0UL;
+ write_pmcntenclr_el0(PMU_CLEAR_ALL);
+
+ /* Clear overflow status */
+ pmu_ptr->pmovsset_el0 = 0UL;
+ write_pmovsclr_el0(PMU_CLEAR_ALL);
+
+ /* Disable overflow interrupts on all counters */
+ pmu_ptr->pmintenset_el1 = 0UL;
+ write_pmintenclr_el1(PMU_CLEAR_ALL);
+
+ WRITE_PMREG(pmccntr_el0, UINT64_MAX);
+ WRITE_PMREG(pmccfiltr_el0, PMCCFILTR_EL0_MASK);
+
+ pmu_ptr->pmuserenr_el0 = read_pmuserenr_el0();
+
+ if (num_cnts != 0U) {
+ switch (--num_cnts) {
+ WRITE_PMEV_REGS(30);
+ WRITE_PMEV_REGS(29);
+ WRITE_PMEV_REGS(28);
+ WRITE_PMEV_REGS(27);
+ WRITE_PMEV_REGS(26);
+ WRITE_PMEV_REGS(25);
+ WRITE_PMEV_REGS(24);
+ WRITE_PMEV_REGS(23);
+ WRITE_PMEV_REGS(22);
+ WRITE_PMEV_REGS(21);
+ WRITE_PMEV_REGS(20);
+ WRITE_PMEV_REGS(19);
+ WRITE_PMEV_REGS(18);
+ WRITE_PMEV_REGS(17);
+ WRITE_PMEV_REGS(16);
+ WRITE_PMEV_REGS(15);
+ WRITE_PMEV_REGS(14);
+ WRITE_PMEV_REGS(13);
+ WRITE_PMEV_REGS(12);
+ WRITE_PMEV_REGS(11);
+ WRITE_PMEV_REGS(10);
+ WRITE_PMEV_REGS(9);
+ WRITE_PMEV_REGS(8);
+ WRITE_PMEV_REGS(7);
+ WRITE_PMEV_REGS(6);
+ WRITE_PMEV_REGS(5);
+ WRITE_PMEV_REGS(4);
+ WRITE_PMEV_REGS(3);
+ WRITE_PMEV_REGS(2);
+ WRITE_PMEV_REGS(1);
+ default:
+ WRITE_PMEV_REGS(0);
+ }
+
+ /* Generate a random number between 0 and num_cnts */
+ val = rand() % ++num_cnts;
+ } else {
+ val = 0UL;
+ }
+
+ pmu_ptr->pmselr_el0 = val;
+ write_pmselr_el0(val);
+
+ pmu_ptr->pmxevcntr_el0 = read_pmxevcntr_el0();
+ pmu_ptr->pmxevtyper_el0 = read_pmxevtyper_el0();
+}
+
+bool host_check_pmu_state(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ struct pmu_registers *pmu_ptr = &pmu_state[core_pos];
+ unsigned int num_cnts = GET_CNT_NUM;
+ unsigned long val, read_val;
+
+ CHECK_PMREG(pmcr_el0);
+ CHECK_PMREG(pmcntenset_el0);
+ CHECK_PMREG(pmovsset_el0);
+ CHECK_PMREG(pmintenset_el1);
+ CHECK_PMREG(pmccntr_el0);
+ CHECK_PMREG(pmccfiltr_el0);
+ CHECK_PMREG(pmuserenr_el0);
+ CHECK_PMREG(pmselr_el0);
+ CHECK_PMREG(pmxevcntr_el0);
+ CHECK_PMREG(pmxevtyper_el0);
+
+ if (num_cnts != 0UL) {
+ switch (--num_cnts) {
+ CHECK_PMEV_REGS(30);
+ CHECK_PMEV_REGS(29);
+ CHECK_PMEV_REGS(28);
+ CHECK_PMEV_REGS(27);
+ CHECK_PMEV_REGS(26);
+ CHECK_PMEV_REGS(25);
+ CHECK_PMEV_REGS(24);
+ CHECK_PMEV_REGS(23);
+ CHECK_PMEV_REGS(22);
+ CHECK_PMEV_REGS(21);
+ CHECK_PMEV_REGS(20);
+ CHECK_PMEV_REGS(19);
+ CHECK_PMEV_REGS(18);
+ CHECK_PMEV_REGS(17);
+ CHECK_PMEV_REGS(16);
+ CHECK_PMEV_REGS(15);
+ CHECK_PMEV_REGS(14);
+ CHECK_PMEV_REGS(13);
+ CHECK_PMEV_REGS(12);
+ CHECK_PMEV_REGS(11);
+ CHECK_PMEV_REGS(10);
+ CHECK_PMEV_REGS(9);
+ CHECK_PMEV_REGS(8);
+ CHECK_PMEV_REGS(7);
+ CHECK_PMEV_REGS(6);
+ CHECK_PMEV_REGS(5);
+ CHECK_PMEV_REGS(4);
+ CHECK_PMEV_REGS(3);
+ CHECK_PMEV_REGS(2);
+ CHECK_PMEV_REGS(1);
+ default:
+ CHECK_PMEV_REGS(0);
+ }
+ }
+
+ return true;
+}
diff --git a/tftf/tests/runtime_services/host_realm_managment/host_realm_helper.c b/tftf/tests/runtime_services/host_realm_managment/host_realm_helper.c
new file mode 100644
index 000000000..682a699da
--- /dev/null
+++ b/tftf/tests/runtime_services/host_realm_managment/host_realm_helper.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <heap/page_alloc.h>
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_realm_rmi.h>
+#include <host_shared_data.h>
+#include <platform.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <realm_def.h>
+#include <sgi.h>
+#include <test_helpers.h>
+#include <xlat_tables_v2.h>
+
+#define RMI_EXIT(id) \
+ [RMI_EXIT_##id] = #id
+
+const char *rmi_exit[] = {
+ RMI_EXIT(SYNC),
+ RMI_EXIT(IRQ),
+ RMI_EXIT(FIQ),
+ RMI_EXIT(FIQ),
+ RMI_EXIT(PSCI),
+ RMI_EXIT(RIPAS_CHANGE),
+ RMI_EXIT(HOST_CALL),
+ RMI_EXIT(SERROR)
+};
+
+/*
+ * The function handler to print the Realm logged buffer,
+ * executed by the secondary core
+ */
+void realm_print_handler(struct realm *realm_ptr, unsigned int rec_num)
+{
+ size_t str_len = 0UL;
+ host_shared_data_t *host_shared_data;
+ char *log_buffer;
+
+ assert(realm_ptr != NULL);
+ host_shared_data = host_get_shared_structure(realm_ptr, rec_num);
+ log_buffer = (char *)host_shared_data->log_buffer;
+ str_len = strlen((const char *)log_buffer);
+
+ /*
+ * Read Realm message from shared printf location and print
+ * them using UART
+ */
+ if (str_len != 0UL) {
+ /* Avoid memory overflow */
+ log_buffer[MAX_BUF_SIZE - 1] = 0U;
+ mp_printf("[VMID %u][Rec %u]: %s", realm_ptr->vmid, rec_num, log_buffer);
+ (void)memset((char *)log_buffer, 0, MAX_BUF_SIZE);
+ }
+}
+
+/*
+ * Initialisation function which will clear the shared region,
+ * and try to find another CPU other than the lead one to
+ * handle the Realm message logging.
+ */
+static void host_init_realm_print_buffer(struct realm *realm_ptr)
+{
+ host_shared_data_t *host_shared_data;
+
+ for (unsigned int i = 0U; i < realm_ptr->rec_count; i++) {
+ host_shared_data = host_get_shared_structure(realm_ptr, i);
+ (void)memset((char *)host_shared_data, 0, sizeof(host_shared_data_t));
+ }
+}
+
+static bool host_enter_realm(struct realm *realm_ptr,
+ u_register_t *exit_reason,
+ unsigned int *host_call_result,
+ unsigned int rec_num)
+{
+ u_register_t ret;
+
+ if (!realm_ptr->payload_created) {
+ ERROR("%s() failed\n", "payload_created");
+ return false;
+ }
+ if (!realm_ptr->shared_mem_created) {
+ ERROR("%s() failed\n", "shared_mem_created");
+ return false;
+ }
+
+ /* Enter Realm */
+ ret = host_realm_rec_enter(realm_ptr, exit_reason, host_call_result, rec_num);
+ if (ret != REALM_SUCCESS) {
+ ERROR("%s() failed, ret=%lx\n", "host_realm_rec_enter", ret);
+ return false;
+ }
+
+ return true;
+}
+
+bool host_create_realm_payload(struct realm *realm_ptr,
+ u_register_t realm_payload_adr,
+ u_register_t plat_mem_pool_adr,
+ u_register_t realm_pages_size,
+ u_register_t feature_flag,
+ const u_register_t *rec_flag,
+ unsigned int rec_count)
+{
+ int8_t value;
+
+ if (realm_payload_adr == TFTF_BASE) {
+ ERROR("realm_payload_adr should be grater then TFTF_BASE\n");
+ return false;
+ }
+
+ if (plat_mem_pool_adr == 0UL ||
+ realm_pages_size == 0UL) {
+ ERROR("plat_mem_pool_size or "
+ "realm_pages_size is NULL\n");
+ return false;
+ }
+
+ if (plat_mem_pool_adr < PAGE_POOL_BASE ||
+ plat_mem_pool_adr + realm_pages_size > NS_REALM_SHARED_MEM_BASE) {
+ ERROR("Invalid pool range\n");
+ return false;
+ }
+
+ INFO("Realm start adr=0x%lx\n", plat_mem_pool_adr);
+
+ /* Initialize Host NS heap memory to be used in Realm creation*/
+ if (page_pool_init(plat_mem_pool_adr, realm_pages_size)
+ != HEAP_INIT_SUCCESS) {
+ ERROR("%s() failed\n", "page_pool_init");
+ return false;
+ }
+ memset((char *)realm_ptr, 0U, sizeof(struct realm));
+
+ /* Read Realm Feature Reg 0 */
+ if (host_rmi_features(0UL, &realm_ptr->rmm_feat_reg0) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_rmi_features");
+ return false;
+ }
+
+ /* Disable PMU if not required */
+ if ((feature_flag & RMI_FEATURE_REGISTER_0_PMU_EN) == 0UL) {
+ realm_ptr->rmm_feat_reg0 &= ~RMI_FEATURE_REGISTER_0_PMU_EN;
+ realm_ptr->pmu_num_ctrs = 0U;
+ } else {
+ value = EXTRACT(FEATURE_PMU_NUM_CTRS, feature_flag);
+ if (value != -1) {
+ realm_ptr->pmu_num_ctrs = (unsigned int)value;
+ } else {
+ realm_ptr->pmu_num_ctrs =
+ EXTRACT(RMI_FEATURE_REGISTER_0_PMU_NUM_CTRS,
+ realm_ptr->rmm_feat_reg0);
+ }
+ }
+
+ /* Disable SVE if not required */
+ if ((feature_flag & RMI_FEATURE_REGISTER_0_SVE_EN) == 0UL) {
+ realm_ptr->rmm_feat_reg0 &= ~RMI_FEATURE_REGISTER_0_SVE_EN;
+ realm_ptr->sve_vl = 0U;
+ } else {
+ realm_ptr->sve_vl = EXTRACT(FEATURE_SVE_VL, feature_flag);
+ }
+
+ /* Requested number of breakpoints */
+ value = EXTRACT(FEATURE_NUM_BPS, feature_flag);
+ if (value != -1) {
+ realm_ptr->num_bps = (unsigned int)value;
+ } else {
+ realm_ptr->num_bps = EXTRACT(RMI_FEATURE_REGISTER_0_NUM_BPS,
+ realm_ptr->rmm_feat_reg0);
+ }
+
+ /* Requested number of watchpoints */
+ value = EXTRACT(FEATURE_NUM_WPS, feature_flag);
+ if (value != -1) {
+ realm_ptr->num_wps = (unsigned int)value;
+ } else {
+ realm_ptr->num_wps = EXTRACT(RMI_FEATURE_REGISTER_0_NUM_WPS,
+ realm_ptr->rmm_feat_reg0);
+ }
+
+ /* Set SVE bits from feature_flag */
+ realm_ptr->rmm_feat_reg0 &= ~(RMI_FEATURE_REGISTER_0_SVE_EN |
+ MASK(RMI_FEATURE_REGISTER_0_SVE_VL));
+ if ((feature_flag & RMI_FEATURE_REGISTER_0_SVE_EN) != 0UL) {
+ realm_ptr->rmm_feat_reg0 |= RMI_FEATURE_REGISTER_0_SVE_EN |
+ INPLACE(RMI_FEATURE_REGISTER_0_SVE_VL,
+ EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL,
+ feature_flag));
+ }
+
+ if (realm_ptr->rec_count > MAX_REC_COUNT) {
+ ERROR("Invalid Rec Count\n");
+ return false;
+ }
+ realm_ptr->rec_count = rec_count;
+ for (unsigned int i = 0U; i < rec_count; i++) {
+ if (rec_flag[i] == RMI_RUNNABLE ||
+ rec_flag[i] == RMI_NOT_RUNNABLE) {
+ realm_ptr->rec_flag[i] = rec_flag[i];
+ } else {
+ ERROR("Invalid Rec Flag\n");
+ return false;
+ }
+ }
+
+ /*
+ * At the moment, TFTF does not have support for FEAT_LPA2, so if
+ * S2SZ is larger than 48 bits, truncate it to ensure we don't surpass
+ * the maximum IPA size for a realm with no LPA2 support.
+ */
+ if (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ, realm_ptr->rmm_feat_reg0) > 48U) {
+ realm_ptr->rmm_feat_reg0 &=
+ ~MASK(RMI_FEATURE_REGISTER_0_S2SZ);
+ realm_ptr->rmm_feat_reg0 |=
+ INPLACE(RMI_FEATURE_REGISTER_0_S2SZ, 48U);
+ }
+
+ /* Create Realm */
+ if (host_realm_create(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_create");
+ return false;
+ }
+
+ if (host_realm_init_ipa_state(realm_ptr, 0U, 0U, 1ULL << 32)
+ != RMI_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_init_ipa_state");
+ goto destroy_realm;
+ }
+
+ /* RTT map Realm image */
+ if (host_realm_map_payload_image(realm_ptr, realm_payload_adr) !=
+ REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_map_payload_image");
+ goto destroy_realm;
+ }
+
+ /* Create REC */
+ if (host_realm_rec_create(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_rec_create");
+ goto destroy_realm;
+ }
+
+ realm_ptr->payload_created = true;
+
+ return true;
+
+ /* Free test resources */
+destroy_realm:
+ if (host_realm_destroy(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_destroy");
+ }
+ realm_ptr->payload_created = false;
+
+ return false;
+}
+
+bool host_create_activate_realm_payload(struct realm *realm_ptr,
+ u_register_t realm_payload_adr,
+ u_register_t plat_mem_pool_adr,
+ u_register_t realm_pages_size,
+ u_register_t feature_flag,
+ const u_register_t *rec_flag,
+ unsigned int rec_count)
+
+{
+ bool ret;
+
+ ret = host_create_realm_payload(realm_ptr,
+ realm_payload_adr,
+ plat_mem_pool_adr,
+ realm_pages_size,
+ feature_flag,
+ rec_flag,
+ rec_count);
+ if (!ret) {
+ goto destroy_realm;
+ } else {
+ /* Activate Realm */
+ if (host_realm_activate(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_activate");
+ goto destroy_realm;
+ }
+ }
+ return true;
+
+destroy_realm:
+ if (host_realm_destroy(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_destroy");
+ }
+ realm_ptr->payload_created = false;
+ return false;
+}
+
+bool host_create_shared_mem(struct realm *realm_ptr,
+ u_register_t ns_shared_mem_adr,
+ u_register_t ns_shared_mem_size)
+{
+ if (ns_shared_mem_adr < NS_REALM_SHARED_MEM_BASE ||
+ ns_shared_mem_adr + ns_shared_mem_size > PAGE_POOL_END) {
+ ERROR("%s() Invalid adr range\n", "host_realm_map_ns_shared");
+ return false;
+ }
+
+ /* RTT map NS shared region */
+ if (host_realm_map_ns_shared(realm_ptr, ns_shared_mem_adr,
+ ns_shared_mem_size) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_map_ns_shared");
+ realm_ptr->shared_mem_created = false;
+ return false;
+ }
+
+ memset((void *)ns_shared_mem_adr, 0, (size_t)ns_shared_mem_size);
+ realm_ptr->host_shared_data = ns_shared_mem_adr;
+ realm_ptr->shared_mem_created = true;
+ host_init_realm_print_buffer(realm_ptr);
+
+ return true;
+}
+
+bool host_destroy_realm(struct realm *realm_ptr)
+{
+ /* Free test resources */
+ page_pool_reset();
+
+ if (!realm_ptr->payload_created) {
+ ERROR("%s() failed\n", "payload_created");
+ return false;
+ }
+
+ realm_ptr->payload_created = false;
+ if (host_realm_destroy(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_destroy");
+ return false;
+ }
+ memset((char *)realm_ptr, 0U, sizeof(struct realm));
+
+ return true;
+}
+
+/*
+ * Enter Realm and run command passed in 'cmd' and compare the exit reason with
+ * 'test_exit_reason'.
+ *
+ * Returns:
+ * true: On success. 'test_exit_reason' matches Realm exit reason. For
+ * RMI_EXIT_HOST_CALL exit reason, the 'host_call_result' is
+ * TEST_RESULT_SUCCESS.
+ * false: On error.
+ */
+bool host_enter_realm_execute(struct realm *realm_ptr,
+ uint8_t cmd,
+ int test_exit_reason,
+ unsigned int rec_num)
+{
+ u_register_t realm_exit_reason = RMI_EXIT_INVALID;
+ unsigned int host_call_result = TEST_RESULT_FAIL;
+
+ if (realm_ptr == NULL || realm_ptr->payload_created == false) {
+ return false;
+ }
+
+ if (test_exit_reason >= RMI_EXIT_INVALID) {
+ ERROR("Invalid RmiRecExitReason\n");
+ return false;
+ }
+
+ if (rec_num >= realm_ptr->rec_count) {
+ ERROR("Invalid Rec Count\n");
+ return false;
+ }
+ host_shared_data_set_realm_cmd(realm_ptr, cmd, rec_num);
+ if (!host_enter_realm(realm_ptr, &realm_exit_reason, &host_call_result, rec_num)) {
+ return false;
+ }
+
+
+ if (test_exit_reason == realm_exit_reason) {
+ if (realm_exit_reason != RMI_EXIT_HOST_CALL) {
+ return true;
+ } else if (host_call_result == TEST_RESULT_SUCCESS) {
+ return true;
+ }
+ }
+
+ if (realm_exit_reason < RMI_EXIT_INVALID) {
+ if ((realm_exit_reason == RMI_EXIT_HOST_CALL) &&
+ (test_exit_reason == realm_exit_reason)) {
+ ERROR("%s(%u) RMI_EXIT_HOST_CALL failed\n", __func__,
+ cmd);
+ } else {
+ ERROR("%s(%u) Got RMI_EXIT_%s. Expected RMI_EXIT_%s.\n",
+ __func__, cmd, rmi_exit[realm_exit_reason],
+ rmi_exit[test_exit_reason]);
+ }
+ } else {
+ ERROR("%s(%u) Unknown or unsupported RmiRecExitReason: 0x%lx\n",
+ __func__, cmd, realm_exit_reason);
+ }
+ return false;
+}
+
+test_result_t host_cmp_result(void)
+{
+ if (host_rmi_get_cmp_result()) {
+ return TEST_RESULT_SUCCESS;
+ }
+
+ ERROR("RMI registers comparison failed\n");
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * Returns Host core position for specified Rec
+ * Host mpidr is saved on every rec enter
+ */
+static unsigned int host_realm_find_core_pos_by_rec(struct realm *realm_ptr,
+ unsigned int rec_num)
+{
+ if (rec_num < MAX_REC_COUNT && realm_ptr->run[rec_num] != 0U) {
+ return platform_get_core_pos(realm_ptr->host_mpidr[rec_num]);
+ }
+ return (unsigned int)-1;
+}
+
+/*
+ * Send SGI on core running specified Rec
+ * API can be used to forcefully exit from Realm
+ */
+void host_rec_send_sgi(struct realm *realm_ptr,
+ unsigned int sgi,
+ unsigned int rec_num)
+{
+ unsigned int core_pos = host_realm_find_core_pos_by_rec(realm_ptr, rec_num);
+ if (core_pos < PLATFORM_CORE_COUNT) {
+ tftf_send_sgi(sgi, core_pos);
+ }
+}
diff --git a/tftf/tests/runtime_services/host_realm_managment/host_realm_rmi.c b/tftf/tests/runtime_services/host_realm_managment/host_realm_rmi.c
new file mode 100644
index 000000000..057dd0032
--- /dev/null
+++ b/tftf/tests/runtime_services/host_realm_managment/host_realm_rmi.c
@@ -0,0 +1,1267 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <debug.h>
+#include <heap/page_alloc.h>
+#include <test_helpers.h>
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_realm_rmi.h>
+#include <host_shared_data.h>
+#include <plat/common/platform.h>
+#include <realm_def.h>
+#include <tftf_lib.h>
+
+#define SET_ARG(_n) { \
+ case _n: \
+ regs[_n] = rand64(); \
+ CONC(args->arg, _n) = regs[_n]; \
+ __attribute__((fallthrough)); \
+}
+
+#define CHECK_RET(_n) { \
+ if (CONC(ret_val.ret, _n) != regs[_n]) { \
+ cmp_flag |= (1U << _n); \
+ } \
+}
+
+static bool rmi_cmp_result;
+static unsigned short vmid;
+
+static smc_ret_values host_rmi_handler(smc_args *args, unsigned int in_reg)
+{
+ u_register_t regs[8];
+ smc_ret_values ret_val;
+ unsigned int cmp_flag = 0U;
+
+ assert(args != NULL);
+ assert((in_reg >= 1U) && (in_reg <= 7U));
+
+ /* Function identifier */
+ regs[0] = (u_register_t)args->fid;
+
+ /* X4 and X5 can be passed as parameters */
+ regs[4] = args->arg4;
+ regs[5] = args->arg5;
+
+ /* SMC calls arguments in X1-X7 */
+ switch (in_reg) {
+ SET_ARG(1);
+ SET_ARG(2);
+ SET_ARG(3);
+ SET_ARG(4);
+ SET_ARG(5);
+ SET_ARG(6);
+ default:
+ regs[7] = rand();
+ args->arg7 = regs[7];
+ }
+
+ ret_val = tftf_smc(args);
+
+ /*
+ * According to SMCCC v1.2 X4-X7 registers' values
+ * must be preserved unless they contain result,
+ * as specified in the function definition.
+ */
+ if (regs[0] != RMI_RTT_READ_ENTRY) {
+ CHECK_RET(4);
+ }
+
+ CHECK_RET(5);
+ CHECK_RET(6);
+ CHECK_RET(7);
+
+ if (cmp_flag != 0U) {
+ rmi_cmp_result = false;
+
+ ERROR("RMI SMC 0x%lx corrupted registers: %s %s %s %s\n",
+ regs[0],
+ (((cmp_flag & (1U << 4)) != 0U) ? "X4" : ""),
+ (((cmp_flag & (1U << 5)) != 0U) ? "X5" : ""),
+ (((cmp_flag & (1U << 6)) != 0U) ? "X6" : ""),
+ (((cmp_flag & (1U << 7)) != 0U) ? "X7" : ""));
+ }
+
+ return ret_val;
+}
+
+void host_rmi_init_cmp_result(void)
+{
+ rmi_cmp_result = true;
+}
+
+bool host_rmi_get_cmp_result(void)
+{
+ return rmi_cmp_result;
+}
+
+u_register_t host_rmi_psci_complete(u_register_t calling_rec, u_register_t target_rec,
+ unsigned long status)
+{
+ return (host_rmi_handler(&(smc_args) {RMI_PSCI_COMPLETE, calling_rec,
+ target_rec, status}, 4U)).ret0;
+}
+
+u_register_t host_rmi_data_create(bool unknown,
+ u_register_t rd,
+ u_register_t data,
+ u_register_t map_addr,
+ u_register_t src)
+{
+ if (unknown) {
+ return host_rmi_handler(&(smc_args) {RMI_DATA_CREATE_UNKNOWN,
+ rd, data, map_addr}, 4U).ret0;
+ } else {
+ return host_rmi_handler(&(smc_args) {RMI_DATA_CREATE,
+ /* X5 = flags */
+ rd, data, map_addr, src, 0UL}, 6U).ret0;
+ }
+}
+
+static inline u_register_t host_rmi_realm_activate(u_register_t rd)
+{
+ return host_rmi_handler(&(smc_args) {RMI_REALM_ACTIVATE, rd}, 2U).ret0;
+}
+
+u_register_t host_rmi_realm_create(u_register_t rd, u_register_t params_ptr)
+{
+ return host_rmi_handler(&(smc_args) {RMI_REALM_CREATE, rd, params_ptr},
+ 3U).ret0;
+}
+
+u_register_t host_rmi_realm_destroy(u_register_t rd)
+{
+ return host_rmi_handler(&(smc_args) {RMI_REALM_DESTROY, rd}, 2U).ret0;
+}
+
+u_register_t host_rmi_data_destroy(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t *data,
+ u_register_t *top)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_DATA_DESTROY, rd, map_addr,
+ (u_register_t)&rets}, 4U);
+
+ *data = rets.ret1;
+ *top = rets.ret2;
+ return rets.ret0;
+}
+
+static inline u_register_t host_rmi_rec_create(u_register_t rd,
+ u_register_t rec,
+ u_register_t params_ptr)
+{
+ return host_rmi_handler(&(smc_args) {RMI_REC_CREATE,
+ rd, rec, params_ptr}, 4U).ret0;
+}
+
+static inline u_register_t host_rmi_rec_destroy(u_register_t rec)
+{
+ return host_rmi_handler(&(smc_args) {RMI_REC_DESTROY, rec}, 2U).ret0;
+}
+
+static inline u_register_t host_rmi_rtt_create(u_register_t rd,
+ u_register_t rtt,
+ u_register_t map_addr,
+ u_register_t level)
+{
+ return host_rmi_handler(&(smc_args) {RMI_RTT_CREATE,
+ rd, rtt, map_addr, level}, 5U).ret0;
+}
+
+u_register_t host_rmi_rtt_destroy(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t *rtt,
+ u_register_t *top)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_RTT_DESTROY,
+ rd, map_addr, level, (u_register_t)&rets}, 5U);
+ *rtt = rets.ret1;
+ *top = rets.ret2;
+ return rets.ret0;
+}
+
+u_register_t host_rmi_features(u_register_t index, u_register_t *features)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_FEATURES, index}, 2U);
+ *features = rets.ret1;
+ return rets.ret0;
+}
+
+u_register_t host_rmi_rtt_init_ripas(u_register_t rd,
+ u_register_t start,
+ u_register_t end,
+ u_register_t *top)
+
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_RTT_INIT_RIPAS,
+ rd, start, end}, 4U);
+ *top = rets.ret1;
+ return rets.ret0;
+}
+
+static inline u_register_t host_rmi_rtt_fold(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t *pa)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_RTT_FOLD,
+ rd, map_addr, level, (u_register_t)&rets}, 5U);
+ *pa = rets.ret1;
+ return rets.ret0;
+}
+
+static inline u_register_t host_rmi_rec_aux_count(u_register_t rd,
+ u_register_t *aux_count)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_REC_AUX_COUNT, rd}, 2U);
+ *aux_count = rets.ret1;
+ return rets.ret0;
+}
+
+u_register_t host_rmi_rtt_set_ripas(u_register_t rd,
+ u_register_t rec,
+ u_register_t start,
+ u_register_t end,
+ u_register_t *top)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_RTT_SET_RIPAS,
+ rd, rec, start, end}, 5U);
+ *top = rets.ret1;
+ return rets.ret0;
+}
+
+static inline u_register_t host_rmi_rtt_mapunprotected(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t ns_pa)
+{
+ return host_rmi_handler(&(smc_args) {RMI_RTT_MAP_UNPROTECTED,
+ rd, map_addr, level, ns_pa}, 5U).ret0;
+}
+
+u_register_t host_rmi_rtt_readentry(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ struct rtt_entry *rtt)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_RTT_READ_ENTRY,
+ rd, map_addr, level}, 4U);
+ rtt->walk_level = rets.ret1;
+ rtt->state = rets.ret2;
+ rtt->out_addr = rets.ret3;
+ rtt->ripas = rets.ret4;
+ return rets.ret0;
+}
+
+u_register_t host_rmi_rtt_unmap_unprotected(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t *top)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_RTT_UNMAP_UNPROTECTED,
+ rd, map_addr, level}, 4U);
+ *top = rets.ret1;
+ return rets.ret0;
+}
+
+u_register_t host_rtt_level_mapsize(u_register_t level)
+{
+ if (level > RTT_MAX_LEVEL) {
+ return PAGE_SIZE;
+ }
+
+ return (1UL << RTT_LEVEL_SHIFT(level));
+}
+
+static inline bool ipa_is_ns(u_register_t addr, u_register_t rmm_feat_reg0)
+{
+ return (addr >> (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ, rmm_feat_reg0) - 1UL) == 1UL);
+}
+
+static inline u_register_t host_realm_rtt_create(struct realm *realm,
+ u_register_t addr,
+ u_register_t level,
+ u_register_t phys)
+{
+ addr = ALIGN_DOWN(addr, host_rtt_level_mapsize(level - 1U));
+ return host_rmi_rtt_create(realm->rd, phys, addr, level);
+}
+
+u_register_t host_rmi_create_rtt_levels(struct realm *realm,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t max_level)
+{
+ u_register_t rtt, ret;
+
+ while (level++ < max_level) {
+ rtt = (u_register_t)page_alloc(PAGE_SIZE);
+ if (rtt == HEAP_NULL_PTR) {
+ ERROR("Failed to allocate memory for rtt\n");
+ return REALM_ERROR;
+ } else {
+ ret = host_rmi_granule_delegate(rtt);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rtt=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_delegate", rtt, ret);
+ return REALM_ERROR;
+ }
+ }
+ ret = host_realm_rtt_create(realm, map_addr, level, rtt);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rtt=0x%lx ret=0x%lx\n",
+ "host_realm_rtt_create", rtt, ret);
+ host_rmi_granule_undelegate(rtt);
+ page_free(rtt);
+ return REALM_ERROR;
+ }
+ }
+
+ return REALM_SUCCESS;
+}
+
+static u_register_t host_realm_fold_rtt(u_register_t rd, u_register_t addr,
+ u_register_t level)
+{
+ struct rtt_entry rtt;
+ u_register_t pa, ret;
+
+ ret = host_rmi_rtt_readentry(rd, addr, level, &rtt);
+
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, level=0x%lx addr=0x%lx ret=0x%lx\n",
+ "host_rmi_rtt_readentry", level, addr, ret);
+ return REALM_ERROR;
+ }
+
+ if (rtt.state != RMI_TABLE) {
+ ERROR("%s() failed, rtt.state=%lu\n", "rmi_rtt_readentry",
+ rtt.state);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_rtt_fold(rd, addr, level + 1U, &pa);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, addr=0x%lx ret=0x%lx\n",
+ "host_rmi_rtt_fold", addr, ret);
+ return REALM_ERROR;
+ }
+
+ page_free(rtt.out_addr);
+
+ return REALM_SUCCESS;
+
+}
+
+u_register_t host_realm_delegate_map_protected_data(bool unknown,
+ struct realm *realm,
+ u_register_t target_pa,
+ u_register_t map_size,
+ u_register_t src_pa)
+{
+ u_register_t rd = realm->rd;
+ u_register_t map_level, level;
+ u_register_t ret = 0UL;
+ u_register_t size = 0UL;
+ u_register_t phys = target_pa;
+ u_register_t map_addr = target_pa;
+
+ if (!IS_ALIGNED(map_addr, map_size)) {
+ return REALM_ERROR;
+ }
+
+ switch (map_size) {
+ case PAGE_SIZE:
+ map_level = 3UL;
+ break;
+ case RTT_L2_BLOCK_SIZE:
+ map_level = 2UL;
+ break;
+ default:
+ ERROR("Unknown map_size=0x%lx\n", map_size);
+ return REALM_ERROR;
+ }
+
+ for (size = 0UL; size < map_size; size += PAGE_SIZE) {
+ ret = host_rmi_granule_delegate(phys);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, PA=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_delegate", phys, ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_data_create(unknown, rd, phys, map_addr, src_pa);
+
+ if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) {
+ /* Create missing RTTs and retry */
+ level = RMI_RETURN_INDEX(ret);
+ ret = host_rmi_create_rtt_levels(realm, map_addr, level,
+ map_level);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx line=%u\n",
+ "host_rmi_create_rtt_levels",
+ ret, __LINE__);
+ goto err;
+ }
+
+ ret = host_rmi_data_create(unknown, rd, phys, map_addr,
+ src_pa);
+ }
+
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n",
+ "host_rmi_data_create", ret);
+ goto err;
+ }
+
+ phys += PAGE_SIZE;
+ src_pa += PAGE_SIZE;
+ map_addr += PAGE_SIZE;
+ }
+
+ if (map_size == RTT_L2_BLOCK_SIZE) {
+ ret = host_realm_fold_rtt(rd, target_pa, map_level);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n",
+ "host_realm_fold_rtt", ret);
+ goto err;
+ }
+ }
+
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n", __func__, ret);
+ goto err;
+ }
+
+ return REALM_SUCCESS;
+
+err:
+ while (size >= PAGE_SIZE) {
+ u_register_t data, top;
+
+ ret = host_rmi_data_destroy(rd, map_addr, &data, &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, addr=0x%lx ret=0x%lx\n",
+ "host_rmi_data_destroy", map_addr, ret);
+ }
+
+ ret = host_rmi_granule_undelegate(phys);
+ if (ret != RMI_SUCCESS) {
+ /* Page can't be returned to NS world so is lost */
+ ERROR("%s() failed, ret=0x%lx\n",
+ "host_rmi_granule_undelegate", ret);
+ }
+ phys -= PAGE_SIZE;
+ size -= PAGE_SIZE;
+ map_addr -= PAGE_SIZE;
+ }
+
+ return REALM_ERROR;
+}
+
+u_register_t host_realm_map_unprotected(struct realm *realm,
+ u_register_t ns_pa,
+ u_register_t map_size)
+{
+ u_register_t rd = realm->rd;
+ u_register_t map_level, level;
+ u_register_t ret = 0UL;
+ u_register_t phys = ns_pa;
+ u_register_t map_addr = ns_pa |
+ (1UL << (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ,
+ realm->rmm_feat_reg0) - 1UL));
+
+ if (!IS_ALIGNED(map_addr, map_size)) {
+ return REALM_ERROR;
+ }
+
+ switch (map_size) {
+ case PAGE_SIZE:
+ map_level = 3UL;
+ break;
+ case RTT_L2_BLOCK_SIZE:
+ map_level = 2UL;
+ break;
+ default:
+ ERROR("Unknown map_size=0x%lx\n", map_size);
+ return REALM_ERROR;
+ }
+ u_register_t desc = phys | S2TTE_ATTR_FWB_WB_RW;
+
+ ret = host_rmi_rtt_mapunprotected(rd, map_addr, map_level, desc);
+
+ if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) {
+ /* Create missing RTTs and retry */
+ level = RMI_RETURN_INDEX(ret);
+ ret = host_rmi_create_rtt_levels(realm, map_addr, level,
+ map_level);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx line=%u\n",
+ "host_rmi_create_rtt_levels", ret, __LINE__);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_rtt_mapunprotected(rd, map_addr, map_level,
+ desc);
+ }
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n", "host_rmi_rtt_mapunprotected",
+ ret);
+ return REALM_ERROR;
+ }
+
+ return REALM_SUCCESS;
+}
+
+static u_register_t host_realm_rtt_destroy(struct realm *realm,
+ u_register_t addr,
+ u_register_t level,
+ u_register_t *rtt,
+ u_register_t *top)
+{
+ addr = ALIGN_DOWN(addr, host_rtt_level_mapsize(level - 1U));
+ return host_rmi_rtt_destroy(realm->rd, addr, level, rtt, top);
+}
+
+static u_register_t host_realm_destroy_free_rtt(struct realm *realm,
+ u_register_t addr,
+ u_register_t level,
+ u_register_t rtt_granule)
+{
+ u_register_t rtt, top, ret;
+
+ ret = host_realm_rtt_destroy(realm, addr, level, &rtt, &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n",
+ "host_realm_rtt_destroy", ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_granule_undelegate(rtt_granule);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rtt=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", rtt_granule, ret);
+ return REALM_ERROR;
+ }
+
+ page_free(rtt_granule);
+ return REALM_SUCCESS;
+}
+
+static u_register_t host_realm_destroy_undelegate_range(struct realm *realm,
+ u_register_t ipa,
+ u_register_t addr,
+ u_register_t size)
+{
+ u_register_t rd = realm->rd;
+ u_register_t ret;
+ u_register_t data, top;
+
+ while (size >= PAGE_SIZE) {
+ ret = host_rmi_data_destroy(rd, ipa, &data, &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, addr=0x%lx ret=0x%lx\n",
+ "host_rmi_data_destroy", ipa, ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_granule_undelegate(addr);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, addr=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", ipa, ret);
+ return REALM_ERROR;
+ }
+
+ page_free(addr);
+
+ addr += PAGE_SIZE;
+ ipa += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ return REALM_SUCCESS;
+}
+
+static u_register_t host_realm_tear_down_rtt_range(struct realm *realm,
+ u_register_t level,
+ u_register_t start,
+ u_register_t end)
+{
+ u_register_t rd = realm->rd;
+ u_register_t map_size = host_rtt_level_mapsize(level);
+ u_register_t map_addr, next_addr, rtt_out_addr, end_addr, top;
+ struct rtt_entry rtt;
+ u_register_t ret;
+
+ for (map_addr = start; map_addr < end; map_addr = next_addr) {
+ next_addr = ALIGN(map_addr + 1U, map_size);
+ end_addr = MIN(next_addr, end);
+
+ ret = host_rmi_rtt_readentry(rd, ALIGN_DOWN(map_addr, map_size),
+ level, &rtt);
+ if (ret != RMI_SUCCESS) {
+ continue;
+ }
+
+ rtt_out_addr = rtt.out_addr;
+
+ switch (rtt.state) {
+ case RMI_ASSIGNED:
+ if (ipa_is_ns(map_addr, realm->rmm_feat_reg0)) {
+
+ ret = host_rmi_rtt_unmap_unprotected(
+ rd,
+ map_addr,
+ level,
+ &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, addr=0x%lx ret=0x%lx\n",
+ "host_rmi_rtt_unmap_unprotected",
+ map_addr, ret);
+ return REALM_ERROR;
+ }
+ } else {
+ ret = host_realm_destroy_undelegate_range(
+ realm,
+ map_addr,
+ rtt_out_addr,
+ map_size);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, addr=0x%lx ret=0x%lx\n",
+ "host_realm_destroy_undelegate_range",
+ map_addr, ret);
+ return REALM_ERROR;
+ }
+ }
+ break;
+ case RMI_UNASSIGNED:
+ break;
+ case RMI_TABLE:
+ ret = host_realm_tear_down_rtt_range(realm, level + 1U,
+ map_addr,
+ end_addr);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, map_addr=0x%lx ret=0x%lx\n",
+ "host_realm_tear_down_rtt_range",
+ map_addr, ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_realm_destroy_free_rtt(realm, map_addr,
+ level + 1U,
+ rtt_out_addr);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, map_addr=0x%lx ret=0x%lx\n",
+ "host_realm_destroy_free_rtt",
+ map_addr, ret);
+ return REALM_ERROR;
+ }
+ break;
+ default:
+ return REALM_ERROR;
+ }
+ }
+
+ return REALM_SUCCESS;
+}
+
+u_register_t host_rmi_granule_delegate(u_register_t addr)
+{
+ return host_rmi_handler(&(smc_args) {RMI_GRANULE_DELEGATE, addr}, 2U).ret0;
+}
+
+u_register_t host_rmi_granule_undelegate(u_register_t addr)
+{
+ return host_rmi_handler(&(smc_args) {RMI_GRANULE_UNDELEGATE, addr}, 2U).ret0;
+}
+
+u_register_t host_rmi_version(u_register_t requested_ver)
+{
+ smc_ret_values ret;
+
+ ret = host_rmi_handler(&(smc_args) {RMI_VERSION, requested_ver}, 2U);
+ if (ret.ret0 == (u_register_t)SMC_UNKNOWN) {
+ return SMC_UNKNOWN;
+ }
+ /* Return lower version. */
+ return ret.ret1;
+}
+
+u_register_t host_realm_create(struct realm *realm)
+{
+ struct rmi_realm_params *params;
+ u_register_t ret;
+
+ realm->par_size = REALM_MAX_LOAD_IMG_SIZE;
+
+ realm->state = REALM_STATE_NULL;
+ /*
+ * Allocate memory for PAR - Realm image. Granule delegation
+ * of PAR will be performed during rtt creation.
+ */
+ realm->par_base = (u_register_t)page_alloc(realm->par_size);
+ if (realm->par_base == HEAP_NULL_PTR) {
+ ERROR("page_alloc failed, base=0x%lx, size=0x%lx\n",
+ realm->par_base, realm->par_size);
+ return REALM_ERROR;
+ }
+
+ /* Allocate and delegate RD */
+ realm->rd = (u_register_t)page_alloc(PAGE_SIZE);
+ if (realm->rd == HEAP_NULL_PTR) {
+ ERROR("Failed to allocate memory for rd\n");
+ goto err_free_par;
+ } else {
+ ret = host_rmi_granule_delegate(realm->rd);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rd=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_delegate", realm->rd, ret);
+ goto err_free_rd;
+ }
+ }
+
+ /* Allocate and delegate RTT */
+ realm->rtt_addr = (u_register_t)page_alloc(PAGE_SIZE);
+ if (realm->rtt_addr == HEAP_NULL_PTR) {
+ ERROR("Failed to allocate memory for rtt_addr\n");
+ goto err_undelegate_rd;
+ } else {
+ ret = host_rmi_granule_delegate(realm->rtt_addr);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rtt_addr=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_delegate", realm->rtt_addr, ret);
+ goto err_free_rtt;
+ }
+ }
+
+ /* Allocate memory for params */
+ params = (struct rmi_realm_params *)page_alloc(PAGE_SIZE);
+ if (params == NULL) {
+ ERROR("Failed to allocate memory for params\n");
+ goto err_undelegate_rtt;
+ }
+
+ /* Populate params */
+ params->s2sz = EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ,
+ realm->rmm_feat_reg0);
+ params->num_bps = realm->num_bps;
+ params->num_wps = realm->num_wps;
+
+ /* SVE enable and vector length */
+ if ((realm->rmm_feat_reg0 & RMI_FEATURE_REGISTER_0_SVE_EN) != 0UL) {
+ params->flags = RMI_REALM_FLAGS_SVE;
+ params->sve_vl = realm->sve_vl;
+ } else {
+ params->flags = 0UL;
+ params->sve_vl = 0U;
+ }
+
+ /* PMU enable and number of event counters */
+ if ((realm->rmm_feat_reg0 & RMI_FEATURE_REGISTER_0_PMU_EN) != 0UL) {
+ params->flags |= RMI_REALM_FLAGS_PMU;
+ params->pmu_num_ctrs = realm->pmu_num_ctrs;
+ } else {
+ params->pmu_num_ctrs = 0U;
+ }
+
+ params->hash_algo = RMI_HASH_SHA_256;
+ params->vmid = vmid++;
+ params->rtt_base = realm->rtt_addr;
+ params->rtt_level_start = 0L;
+ params->rtt_num_start = 1U;
+
+ /* Create Realm */
+ ret = host_rmi_realm_create(realm->rd, (u_register_t)params);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rd=0x%lx ret=0x%lx\n",
+ "host_rmi_realm_create", realm->rd, ret);
+ goto err_free_params;
+ }
+
+ realm->vmid = params->vmid;
+ ret = host_rmi_rec_aux_count(realm->rd, &realm->num_aux);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rd=0x%lx ret=0x%lx\n",
+ "host_rmi_rec_aux_count", realm->rd, ret);
+ host_rmi_realm_destroy(realm->rd);
+ goto err_free_params;
+ }
+
+ realm->state = REALM_STATE_NEW;
+
+ /* Free params */
+ page_free((u_register_t)params);
+ return REALM_SUCCESS;
+
+err_free_params:
+ page_free((u_register_t)params);
+
+err_undelegate_rtt:
+ ret = host_rmi_granule_undelegate(realm->rtt_addr);
+ if (ret != RMI_SUCCESS) {
+ WARN("%s() failed, rtt_addr=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", realm->rtt_addr, ret);
+ }
+
+err_free_rtt:
+ page_free(realm->rtt_addr);
+
+err_undelegate_rd:
+ ret = host_rmi_granule_undelegate(realm->rd);
+ if (ret != RMI_SUCCESS) {
+ WARN("%s() failed, rd=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", realm->rd, ret);
+ }
+err_free_rd:
+ page_free(realm->rd);
+
+err_free_par:
+ page_free(realm->par_base);
+
+ return REALM_ERROR;
+}
+
+u_register_t host_realm_map_payload_image(struct realm *realm,
+ u_register_t realm_payload_adr)
+{
+ u_register_t src_pa = realm_payload_adr;
+ u_register_t i = 0UL;
+ u_register_t ret;
+
+ /* MAP image regions */
+ while (i < (realm->par_size / PAGE_SIZE)) {
+ ret = host_realm_delegate_map_protected_data(false, realm,
+ realm->par_base + i * PAGE_SIZE,
+ PAGE_SIZE,
+ src_pa + i * PAGE_SIZE);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, par_base=0x%lx ret=0x%lx\n",
+ "host_realm_map_protected_data",
+ realm->par_base, ret);
+ return REALM_ERROR;
+ }
+ i++;
+ }
+
+ return REALM_SUCCESS;
+}
+
+u_register_t host_realm_init_ipa_state(struct realm *realm, u_register_t level,
+ u_register_t start, uint64_t end)
+{
+ u_register_t rd = realm->rd, ret;
+ u_register_t top;
+
+ do {
+ if (level > RTT_MAX_LEVEL) {
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_rtt_init_ripas(rd, start, end, &top);
+ if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) {
+ int cur_level = RMI_RETURN_INDEX(ret);
+
+ if (cur_level < level) {
+ ret = host_rmi_create_rtt_levels(realm,
+ start,
+ cur_level,
+ level);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx line=%u\n",
+ "host_rmi_create_rtt_levels",
+ ret, __LINE__);
+ return REALM_ERROR;
+ }
+
+ /* Retry with the RTT levels in place */
+ continue;
+ }
+ }
+
+ /* Retry on the next level */
+ level++;
+
+ } while (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT);
+
+ return ret == RMI_SUCCESS ? RMI_SUCCESS : REALM_ERROR;
+}
+
+u_register_t host_realm_map_ns_shared(struct realm *realm,
+ u_register_t ns_shared_mem_adr,
+ u_register_t ns_shared_mem_size)
+{
+ u_register_t i = 0UL;
+ u_register_t ret;
+
+ realm->ipa_ns_buffer = ns_shared_mem_adr |
+ (1UL << (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ,
+ realm->rmm_feat_reg0) - 1));
+ realm->ns_buffer_size = ns_shared_mem_size;
+ /* MAP SHARED_NS region */
+ while (i < ns_shared_mem_size / PAGE_SIZE) {
+ ret = host_realm_map_unprotected(realm, ns_shared_mem_adr +
+ (i * PAGE_SIZE), PAGE_SIZE);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, par_base=0x%lx ret=0x%lx\n",
+ "host_realm_map_unprotected",
+ (ns_shared_mem_adr + i * PAGE_SIZE), ret);
+ return REALM_ERROR;
+ }
+ i++;
+ }
+ return REALM_SUCCESS;
+}
+
+/* Free AUX pages for rec0 to rec_num */
+static void host_realm_free_rec_aux(u_register_t
+ (*aux_pages)[REC_PARAMS_AUX_GRANULES],
+ unsigned int num_aux, unsigned int rec_num)
+{
+ u_register_t ret;
+
+ assert(rec_num < MAX_REC_COUNT);
+ assert(num_aux <= REC_PARAMS_AUX_GRANULES);
+ for (unsigned int i = 0U; i <= rec_num; i++) {
+ for (unsigned int j = 0U; j < num_aux &&
+ aux_pages[i][j] != 0U; j++) {
+ ret = host_rmi_granule_undelegate(aux_pages[i][j]);
+ if (ret != RMI_SUCCESS) {
+ WARN("%s() failed, index=%u,%u ret=0x%lx\n",
+ "host_rmi_granule_undelegate", i, j, ret);
+ }
+ page_free(aux_pages[i][j]);
+ }
+ }
+}
+
+static u_register_t host_realm_alloc_rec_aux(struct realm *realm,
+ struct rmi_rec_params *params, u_register_t rec_num)
+{
+ u_register_t ret;
+ unsigned int j;
+
+ assert(rec_num < MAX_REC_COUNT);
+ for (j = 0U; j < realm->num_aux; j++) {
+ params->aux[j] = (u_register_t)page_alloc(PAGE_SIZE);
+ if (params->aux[j] == HEAP_NULL_PTR) {
+ ERROR("Failed to allocate memory for aux rec\n");
+ return RMI_ERROR_REALM;
+ }
+ ret = host_rmi_granule_delegate(params->aux[j]);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, index=%u ret=0x%lx\n",
+ "host_rmi_granule_delegate", j, ret);
+ /*
+ * Free current page,
+ * prev pages freed at host_realm_free_rec_aux
+ */
+ page_free(params->aux[j]);
+ params->aux[j] = 0UL;
+ return RMI_ERROR_REALM;
+ }
+
+ /* We need a copy in Realm object for final destruction */
+ realm->aux_pages_all_rec[rec_num][j] = params->aux[j];
+ }
+ return RMI_SUCCESS;
+}
+
+u_register_t host_realm_rec_create(struct realm *realm)
+{
+ struct rmi_rec_params *rec_params;
+ u_register_t ret;
+ unsigned int i;
+
+ for (i = 0U; i < realm->rec_count; i++) {
+ realm->run[i] = 0U;
+ realm->rec[i] = 0U;
+ realm->mpidr[i] = 0U;
+ }
+ (void)memset(realm->aux_pages_all_rec, 0x0, sizeof(u_register_t) *
+ realm->num_aux*realm->rec_count);
+
+ /* Allocate memory for rec_params */
+ rec_params = (struct rmi_rec_params *)page_alloc(PAGE_SIZE);
+ if (rec_params == NULL) {
+ ERROR("Failed to allocate memory for rec_params\n");
+ return REALM_ERROR;
+ }
+
+ for (i = 0U; i < realm->rec_count; i++) {
+ (void)memset(rec_params, 0x0, PAGE_SIZE);
+
+ /* Allocate memory for run object */
+ realm->run[i] = (u_register_t)page_alloc(PAGE_SIZE);
+ if (realm->run[i] == HEAP_NULL_PTR) {
+ ERROR("Failed to allocate memory for run\n");
+ goto err_free_mem;
+ }
+ (void)memset((void *)realm->run[i], 0x0, PAGE_SIZE);
+
+ /* Allocate and delegate REC */
+ realm->rec[i] = (u_register_t)page_alloc(PAGE_SIZE);
+ if (realm->rec[i] == HEAP_NULL_PTR) {
+ ERROR("Failed to allocate memory for REC\n");
+ goto err_free_mem;
+ } else {
+ ret = host_rmi_granule_delegate(realm->rec[i]);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rec=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_delegate", realm->rd, ret);
+ goto err_free_mem;
+ }
+ }
+
+ /* Delegate the required number of auxiliary Granules */
+ ret = host_realm_alloc_rec_aux(realm, rec_params, i);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n", "host_realm_alloc_rec_aux",
+ ret);
+ goto err_free_aux;
+ }
+
+ /* Populate rec_params */
+ rec_params->pc = realm->par_base;
+ rec_params->flags = realm->rec_flag[i];
+
+ rec_params->mpidr = (u_register_t)i;
+ rec_params->num_aux = realm->num_aux;
+ realm->mpidr[i] = (u_register_t)i;
+
+ /* Create REC */
+ ret = host_rmi_rec_create(realm->rd, realm->rec[i],
+ (u_register_t)rec_params);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed,index=%u, ret=0x%lx\n",
+ "host_rmi_rec_create", i, ret);
+ goto err_free_aux;
+ }
+ }
+ /* Free rec_params */
+ page_free((u_register_t)rec_params);
+ return REALM_SUCCESS;
+
+err_free_aux:
+ host_realm_free_rec_aux(realm->aux_pages_all_rec, realm->num_aux, i);
+
+err_free_mem:
+ for (unsigned int j = 0U; j <= i ; j++) {
+ ret = host_rmi_granule_undelegate(realm->rec[j]);
+ if (ret != RMI_SUCCESS) {
+ WARN("%s() failed, rec=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", realm->rec[j], ret);
+ }
+ page_free(realm->run[j]);
+ page_free(realm->rec[j]);
+ }
+ page_free((u_register_t)rec_params);
+ return REALM_ERROR;
+}
+
+u_register_t host_realm_activate(struct realm *realm)
+{
+ u_register_t ret;
+
+ /* Activate Realm */
+ ret = host_rmi_realm_activate(realm->rd);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n", "host_rmi_realm_activate",
+ ret);
+ return REALM_ERROR;
+ }
+
+ realm->state = REALM_STATE_ACTIVE;
+
+ return REALM_SUCCESS;
+}
+
+u_register_t host_realm_destroy(struct realm *realm)
+{
+ u_register_t ret;
+
+ if (realm->state == REALM_STATE_NULL) {
+ return REALM_SUCCESS;
+ }
+
+ /* For each REC - Destroy, undelegate and free */
+ for (unsigned int i = 0U; i < realm->rec_count; i++) {
+ if (realm->rec[i] == 0U) {
+ break;
+ }
+
+ ret = host_rmi_rec_destroy(realm->rec[i]);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rec=0x%lx ret=0x%lx\n",
+ "host_rmi_rec_destroy", realm->rec[i], ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_granule_undelegate(realm->rec[i]);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rec=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", realm->rec[i], ret);
+ return REALM_ERROR;
+ }
+
+ page_free(realm->rec[i]);
+
+ /* Free run object */
+ page_free(realm->run[i]);
+ }
+
+ host_realm_free_rec_aux(realm->aux_pages_all_rec,
+ realm->num_aux, realm->rec_count - 1U);
+
+ /*
+ * For each data granule - Destroy, undelegate and free
+ * RTTs (level 1U and below) must be destroyed leaf-upwards,
+ * using RMI_DATA_DESTROY, RMI_RTT_DESTROY and RMI_GRANULE_UNDELEGATE
+ * commands.
+ */
+ if (host_realm_tear_down_rtt_range(realm, 0UL, 0UL,
+ (1UL << (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ,
+ realm->rmm_feat_reg0) - 1))) != RMI_SUCCESS) {
+ ERROR("host_realm_tear_down_rtt_range() line=%u\n", __LINE__);
+ return REALM_ERROR;
+ }
+ if (realm->shared_mem_created == true) {
+ if (host_realm_tear_down_rtt_range(realm, 0UL, realm->ipa_ns_buffer,
+ (realm->ipa_ns_buffer + realm->ns_buffer_size)) !=
+ RMI_SUCCESS) {
+ ERROR("host_realm_tear_down_rtt_range() line=%u\n", __LINE__);
+ return REALM_ERROR;
+ }
+ }
+
+ /*
+ * RD Destroy, undelegate and free
+ * RTT(L0) undelegate and free
+ * PAR free
+ */
+ ret = host_rmi_realm_destroy(realm->rd);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rd=0x%lx ret=0x%lx\n",
+ "host_rmi_realm_destroy", realm->rd, ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_granule_undelegate(realm->rd);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rd=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", realm->rd, ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_granule_undelegate(realm->rtt_addr);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rtt_addr=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", realm->rtt_addr, ret);
+ return REALM_ERROR;
+ }
+
+ page_free(realm->rd);
+ page_free(realm->rtt_addr);
+ page_free(realm->par_base);
+
+ return REALM_SUCCESS;
+}
+
+unsigned int host_realm_find_rec_by_mpidr(unsigned int mpidr, struct realm *realm)
+{
+ for (unsigned int i = 0U; i < MAX_REC_COUNT; i++) {
+ if (realm->run[i] != 0U && realm->mpidr[i] == mpidr) {
+ return i;
+ }
+ }
+ return MAX_REC_COUNT;
+}
+
+u_register_t host_realm_rec_enter(struct realm *realm,
+ u_register_t *exit_reason,
+ unsigned int *host_call_result,
+ unsigned int rec_num)
+{
+ struct rmi_rec_run *run;
+ u_register_t ret;
+ bool re_enter_rec;
+
+ if (rec_num >= realm->rec_count) {
+ return RMI_ERROR_INPUT;
+ }
+
+ run = (struct rmi_rec_run *)realm->run[rec_num];
+ realm->host_mpidr[rec_num] = read_mpidr_el1();
+ do {
+ re_enter_rec = false;
+ ret = host_rmi_handler(&(smc_args) {RMI_REC_ENTER,
+ realm->rec[rec_num], realm->run[rec_num]}, 3U).ret0;
+ VERBOSE("%s() ret=%lu run->exit.exit_reason=%lu "
+ "run->exit.esr=0x%lx EC_BITS=%u ISS_DFSC_MASK=0x%lx\n",
+ __func__, ret, run->exit.exit_reason, run->exit.esr,
+ ((EC_BITS(run->exit.esr) == EC_DABORT_CUR_EL)),
+ (ISS_BITS(run->exit.esr) & ISS_DFSC_MASK));
+
+ /* If a data abort because of a GPF */
+ if (EC_BITS(run->exit.esr) == EC_DABORT_CUR_EL) {
+ ERROR("EC_BITS(run->exit.esr) == EC_DABORT_CUR_EL\n");
+ if ((ISS_BITS(run->exit.esr) & ISS_DFSC_MASK) ==
+ DFSC_GPF_DABORT) {
+ ERROR("DFSC_GPF_DABORT\n");
+ }
+ }
+
+ if (ret != RMI_SUCCESS) {
+ return ret;
+ }
+
+ if (run->exit.exit_reason == RMI_EXIT_HOST_CALL) {
+ switch (run->exit.imm) {
+ case HOST_CALL_GET_SHARED_BUFF_CMD:
+ run->entry.gprs[0] = realm->ipa_ns_buffer;
+ re_enter_rec = true;
+ break;
+ case HOST_CALL_EXIT_PRINT_CMD:
+ realm_print_handler(realm, run->exit.gprs[0]);
+ re_enter_rec = true;
+ break;
+ case HOST_CALL_EXIT_SUCCESS_CMD:
+ *host_call_result = TEST_RESULT_SUCCESS;
+ break;
+ case HOST_CALL_EXIT_FAILED_CMD:
+ *host_call_result = TEST_RESULT_FAIL;
+ default:
+ break;
+ }
+ }
+ } while (re_enter_rec);
+
+ *exit_reason = run->exit.exit_reason;
+ return ret;
+}
diff --git a/tftf/tests/runtime_services/host_realm_managment/host_shared_data.c b/tftf/tests/runtime_services/host_realm_managment/host_shared_data.c
new file mode 100644
index 000000000..b3bfdae10
--- /dev/null
+++ b/tftf/tests/runtime_services/host_realm_managment/host_shared_data.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+#include <assert.h>
+#include <cassert.h>
+#include <host_realm_mem_layout.h>
+#include <host_realm_rmi.h>
+#include <host_shared_data.h>
+
+/*
+ * Currently we support only creation of a single Realm in TFTF.
+ * Hence we can assume that Shared area should be sufficient for all
+ * the RECs of this Realm.
+ */
+CASSERT(NS_REALM_SHARED_MEM_SIZE > (MAX_REC_COUNT *
+ sizeof(host_shared_data_t)),
+ too_small_realm_shared_mem_size);
+
+/*
+ * Return shared buffer pointer mapped as host_shared_data_t structure
+ */
+host_shared_data_t *host_get_shared_structure(struct realm *realm_ptr, unsigned int rec_num)
+{
+ host_shared_data_t *host_shared_data;
+
+ assert(realm_ptr != NULL);
+ assert(rec_num < MAX_REC_COUNT);
+ host_shared_data = (host_shared_data_t *)realm_ptr->host_shared_data;
+ return &host_shared_data[rec_num];
+}
+
+/*
+ * Set data to be shared from Host to realm
+ */
+void host_shared_data_set_host_val(struct realm *realm_ptr,
+ unsigned int rec_num, uint8_t index, u_register_t val)
+{
+ host_shared_data_t *host_shared_data;
+
+ assert(realm_ptr != NULL);
+ assert(rec_num < MAX_REC_COUNT);
+ assert(index < MAX_DATA_SIZE);
+ host_shared_data = (host_shared_data_t *)realm_ptr->host_shared_data;
+ host_shared_data[rec_num].host_param_val[index] = val;
+}
+
+/*
+ * Return data shared by realm in realm_out_val.
+ */
+u_register_t host_shared_data_get_realm_val(struct realm *realm_ptr,
+ unsigned int rec_num, uint8_t index)
+{
+ host_shared_data_t *host_shared_data;
+
+ assert(realm_ptr != NULL);
+ assert(rec_num < MAX_REC_COUNT);
+ assert(index < MAX_DATA_SIZE);
+ host_shared_data = (host_shared_data_t *)realm_ptr->host_shared_data;
+ return host_shared_data[rec_num].realm_out_val[index];
+}
+
+/*
+ * Set command to be send from Host to realm
+ */
+void host_shared_data_set_realm_cmd(struct realm *realm_ptr,
+ uint8_t cmd, unsigned int rec_num)
+{
+ host_shared_data_t *host_shared_data;
+
+ assert(realm_ptr != NULL);
+ assert(rec_num < MAX_REC_COUNT);
+ host_shared_data = (host_shared_data_t *)realm_ptr->host_shared_data;
+ host_shared_data[rec_num].realm_cmd = cmd;
+}
+
diff --git a/tftf/tests/runtime_services/host_realm_managment/rmi_delegate_tests.c b/tftf/tests/runtime_services/host_realm_managment/rmi_delegate_tests.c
new file mode 100644
index 000000000..ce604c0da
--- /dev/null
+++ b/tftf/tests/runtime_services/host_realm_managment/rmi_delegate_tests.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <arch_features.h>
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_shared_data.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include "rmi_spm_tests.h"
+#include <test_helpers.h>
+
+static test_result_t host_realm_multi_cpu_payload_test(void);
+static test_result_t host_realm_multi_cpu_payload_del_undel(void);
+
+/* Buffer to delegate and undelegate */
+static char bufferdelegate[NUM_GRANULES * GRANULE_SIZE * PLATFORM_CORE_COUNT]
+ __aligned(GRANULE_SIZE);
+static char bufferstate[NUM_GRANULES * PLATFORM_CORE_COUNT];
+
+/*
+ * Overall test for realm payload in three sections:
+ * 1. Single CPU version check: SMC call to realm payload to return
+ * version information
+ * 2. Multi CPU version check: SMC call to realm payload to return
+ * version information from all CPU's in system
+ * 3. Delegate and Undelegate Non-Secure granule via
+ * SMC call to realm payload
+ * 4. Multi CPU delegation where random assignment of states
+ * (realm, non-secure)is assigned to a set of granules.
+ * Each CPU is given a number of granules to delegate in
+ * parallel with the other CPU's
+ * 5. Fail testing of delegation parameters such as
+ * attempting to perform a delegation on the same granule
+ * twice and then testing a misaligned address
+ */
+
+test_result_t host_init_buffer_del(void)
+{
+ u_register_t retrmm;
+
+ host_rmi_init_cmp_result();
+
+ for (uint32_t i = 0; i < (NUM_GRANULES * PLATFORM_CORE_COUNT) ; i++) {
+ if ((rand() % 2) == 0) {
+ retrmm = host_rmi_granule_delegate(
+ (u_register_t)&bufferdelegate[i * GRANULE_SIZE]);
+ bufferstate[i] = B_DELEGATED;
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Delegate operation returns 0x%lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ bufferstate[i] = B_UNDELEGATED;
+ }
+ }
+
+ return host_cmp_result();
+}
+
+/*
+ * Single CPU version check function
+ */
+test_result_t host_realm_version_single_cpu(void)
+{
+ u_register_t retrmm = 0U;
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ host_rmi_init_cmp_result();
+
+ retrmm = host_rmi_version(RMI_ABI_VERSION_VAL);
+
+ tftf_testcase_printf("RMM version is: %lu.%lu (expected: %u.%u)\n",
+ RMI_ABI_VERSION_GET_MAJOR(retrmm),
+ RMI_ABI_VERSION_GET_MINOR(retrmm),
+ RMI_ABI_VERSION_GET_MAJOR(RMI_ABI_VERSION_VAL),
+ RMI_ABI_VERSION_GET_MINOR(RMI_ABI_VERSION_VAL));
+
+ return host_cmp_result();
+}
+
+/*
+ * Multi CPU version check function in parallel.
+ */
+test_result_t host_realm_version_multi_cpu(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ long long ret;
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ host_rmi_init_cmp_result();
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)host_realm_multi_cpu_payload_test, 0);
+
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+
+ }
+
+ ret = host_realm_multi_cpu_payload_test();
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF) {
+ continue;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Delegate and Undelegate Non Secure Granule
+ */
+test_result_t host_realm_delegate_undelegate(void)
+{
+ u_register_t retrmm;
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ host_rmi_init_cmp_result();
+
+ retrmm = host_rmi_granule_delegate((u_register_t)bufferdelegate);
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Delegate operation returns 0x%lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ retrmm = host_rmi_granule_undelegate((u_register_t)bufferdelegate);
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Undelegate operation returns 0x%lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ tftf_testcase_printf("Delegate and undelegate of buffer 0x%lx succeeded\n",
+ (uintptr_t)bufferdelegate);
+
+ return host_cmp_result();
+}
+
+static test_result_t host_realm_multi_cpu_payload_test(void)
+{
+ u_register_t retrmm = 0U;
+
+ host_rmi_init_cmp_result();
+
+ retrmm = host_rmi_version(RMI_ABI_VERSION_VAL);
+
+ tftf_testcase_printf("Multi CPU RMM version on CPU %llx is: %lu.%lu\n",
+ (long long)read_mpidr_el1() & MPID_MASK, RMI_ABI_VERSION_GET_MAJOR(retrmm),
+ RMI_ABI_VERSION_GET_MINOR(retrmm));
+
+ return host_cmp_result();
+}
+
+/*
+ * Select all CPU's to randomly delegate/undelegate
+ * granule pages to stress the delegate mechanism
+ */
+test_result_t host_realm_delundel_multi_cpu(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ long long ret;
+ u_register_t retrmm;
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ host_rmi_init_cmp_result();
+
+ if (host_init_buffer_del() == TEST_RESULT_FAIL) {
+ return TEST_RESULT_FAIL;
+ }
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)host_realm_multi_cpu_payload_del_undel, 0);
+
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+
+ }
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF) {
+ continue;
+ }
+ }
+
+ /*
+ * Cleanup to set all granules back to undelegated
+ */
+ for (uint32_t i = 0; i < (NUM_GRANULES * PLATFORM_CORE_COUNT) ; i++) {
+ if (bufferstate[i] == B_DELEGATED) {
+ retrmm = host_rmi_granule_undelegate(
+ (u_register_t)&bufferdelegate[i * GRANULE_SIZE]);
+ bufferstate[i] = B_UNDELEGATED;
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Delegate operation returns fail, %lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ }
+ }
+
+ return host_cmp_result();
+}
+
+/*
+ * Multi CPU testing of delegate and undelegate of granules
+ * The granules are first randomly initialized to either realm or non secure
+ * using the function init_buffer_del and then the function below
+ * assigns NUM_GRANULES to each CPU for delegation or undelgation
+ * depending upon the initial state
+ */
+static test_result_t host_realm_multi_cpu_payload_del_undel(void)
+{
+ u_register_t retrmm;
+ unsigned int cpu_node;
+
+ cpu_node = platform_get_core_pos(read_mpidr_el1() & MPID_MASK);
+
+ host_rmi_init_cmp_result();
+
+ for (uint32_t i = 0; i < NUM_GRANULES; i++) {
+ if (bufferstate[((cpu_node * NUM_GRANULES) + i)] == B_UNDELEGATED) {
+ retrmm = host_rmi_granule_delegate((u_register_t)
+ &bufferdelegate[((cpu_node * NUM_GRANULES) + i) * GRANULE_SIZE]);
+ bufferstate[((cpu_node * NUM_GRANULES) + i)] = B_DELEGATED;
+ } else {
+ retrmm = host_rmi_granule_undelegate((u_register_t)
+ &bufferdelegate[((cpu_node * NUM_GRANULES) + i) * GRANULE_SIZE]);
+ bufferstate[((cpu_node * NUM_GRANULES) + i)] = B_UNDELEGATED;
+ }
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Delegate operation returns 0x%lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return host_cmp_result();
+}
+
+/*
+ * Fail testing of delegation process. The first is an error expected
+ * for processing the same granule twice and the second is submission of
+ * a misaligned address
+ */
+test_result_t host_realm_fail_del(void)
+{
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ u_register_t retrmm;
+
+ host_rmi_init_cmp_result();
+
+ retrmm = host_rmi_granule_delegate((u_register_t)&bufferdelegate[0]);
+ if (retrmm != 0UL) {
+ tftf_testcase_printf
+ ("Delegate operation does not pass as expected for double delegation, %lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ retrmm = host_rmi_granule_delegate((u_register_t)&bufferdelegate[0]);
+ if (retrmm == 0UL) {
+ tftf_testcase_printf
+ ("Delegate operation does not fail as expected for double delegation, %lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ retrmm = host_rmi_granule_undelegate((u_register_t)&bufferdelegate[1]);
+ if (retrmm == 0UL) {
+ tftf_testcase_printf
+ ("Delegate operation does not return fail for misaligned address, %lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ retrmm = host_rmi_granule_undelegate((u_register_t)&bufferdelegate[0]);
+
+ if (retrmm != 0UL) {
+ tftf_testcase_printf
+ ("Delegate operation returns fail for cleanup, %lx\n", retrmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+}
diff --git a/tftf/tests/runtime_services/host_realm_managment/rmi_spm_tests.c b/tftf/tests/runtime_services/host_realm_managment/rmi_spm_tests.c
new file mode 100644
index 000000000..d7a8157b7
--- /dev/null
+++ b/tftf/tests/runtime_services/host_realm_managment/rmi_spm_tests.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <arch_helpers.h>
+#include <cactus_test_cmds.h>
+#include <debug.h>
+#include <ffa_endpoints.h>
+#include <ffa_svc.h>
+#include <host_realm_helper.h>
+#include <lib/events.h>
+#include <lib/power_management.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include "rmi_spm_tests.h"
+#include <spm_test_helpers.h>
+#include <smccc.h>
+#include <test_helpers.h>
+
+static test_result_t realm_multi_cpu_payload_del_undel(void);
+
+#define ECHO_VAL1 U(0xa0a0a0a0)
+#define ECHO_VAL2 U(0xb0b0b0b0)
+#define ECHO_VAL3 U(0xc0c0c0c0)
+#define MAX_REPEATED_TEST 3
+
+/* Buffer to delegate and undelegate */
+static char bufferdelegate[NUM_GRANULES * GRANULE_SIZE * PLATFORM_CORE_COUNT]
+ __aligned(GRANULE_SIZE);
+static char bufferstate[NUM_GRANULES * PLATFORM_CORE_COUNT];
+static int cpu_test_spm_rmi[PLATFORM_CORE_COUNT];
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
+static unsigned int lead_mpid;
+/*
+ * The following test conducts SPM(direct messaging) tests on a subset of selected CPUs while
+ * simultaneously performing another set of tests of the RMI(delegation)
+ * on the remaining CPU's to the full platform count. Once that test completes
+ * the same test is run again with a different assignment for what CPU does
+ * SPM versus RMI.
+ */
+
+/*
+ * Function that randomizes the CPU assignment of tests, SPM or RMI
+ */
+static void rand_cpu_spm_rmi(void)
+{
+ int fentry;
+ int seln = 0;
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ cpu_test_spm_rmi[i] = -1;
+ }
+ for (int i = 0; i < NUM_CPU_DED_SPM; i++) {
+ fentry = 0;
+ while (fentry == 0) {
+#if (PLATFORM_CORE_COUNT > 1)
+ seln = (rand() % (PLATFORM_CORE_COUNT - 1)) + 1;
+#endif
+ if (cpu_test_spm_rmi[seln] == -1) {
+ cpu_test_spm_rmi[seln] = 1;
+ fentry = 1;
+ }
+ }
+ }
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if (cpu_test_spm_rmi[i] == -1) {
+ cpu_test_spm_rmi[i] = 0;
+ }
+ }
+}
+
+/*
+ * Get function to determine what has been assigned to a given CPU
+ */
+static int spm_rmi_test(unsigned int mpidr)
+{
+ return cpu_test_spm_rmi[platform_get_core_pos(mpidr)];
+}
+
+/*
+ * RMI function to randomize the initial state of granules allocated for the test.
+ * A certain subset will be delegated leaving the rest undelegated
+ */
+static test_result_t init_buffer_del_spm_rmi(void)
+{
+ u_register_t retrmm;
+
+ for (int i = 0; i < (NUM_GRANULES * PLATFORM_CORE_COUNT) ; i++) {
+ if ((rand() % 2) == 0) {
+ retrmm = host_rmi_granule_delegate(
+ (u_register_t)&bufferdelegate[i * GRANULE_SIZE]);
+ bufferstate[i] = B_DELEGATED;
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Delegate operation returns 0x%lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ bufferstate[i] = B_UNDELEGATED;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t reset_buffer_del_spm_rmi(void)
+{
+ u_register_t retrmm;
+
+ for (uint32_t i = 0U; i < (NUM_GRANULES * PLATFORM_CORE_COUNT) ; i++) {
+ if (bufferstate[i] == B_DELEGATED) {
+ retrmm = host_rmi_granule_undelegate(
+ (u_register_t)&bufferdelegate[i * GRANULE_SIZE]);
+ if (retrmm != 0UL) {
+ ERROR("Undelegate operation returns fail, %lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ bufferstate[i] = B_UNDELEGATED;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Each CPU reaching this function will send a ready event to all other CPUs
+ * and wait for others CPUs before start executing its callback in parallel
+ * with all others CPUs
+ */
+static test_result_t wait_then_call(test_result_t (*callback)(void))
+{
+ unsigned int mpidr, this_mpidr = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_node, core_pos;
+ unsigned int this_core_pos = platform_get_core_pos(this_mpidr);
+
+ tftf_send_event_to_all(&cpu_booted[this_core_pos]);
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ /* Ignore myself and the lead core */
+ if (mpidr == this_mpidr || mpidr == lead_mpid) {
+ continue;
+ }
+ core_pos = platform_get_core_pos(mpidr);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+ /* All cores reach this call in approximately "same" time */
+ return (*callback)();
+}
+
+/*
+ * Power on the given cpu and provide it with entrypoint to run and return result
+ */
+static test_result_t run_on_cpu(unsigned int mpidr, uintptr_t cpu_on_handler)
+{
+ int32_t ret;
+
+ ret = tftf_cpu_on(mpidr, cpu_on_handler, 0U);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("tftf_cpu_on mpidr 0x%x returns %d\n", mpidr, ret);
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * SPM functions for the direct messaging
+ */
+static const struct ffa_uuid expected_sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
+ };
+
+static test_result_t send_cactus_echo_cmd(ffa_id_t sender,
+ ffa_id_t dest,
+ uint64_t value)
+{
+ struct ffa_value ret;
+ ret = cactus_echo_send_cmd(sender, dest, value);
+
+ /*
+ * Return responses may be FFA_MSG_SEND_DIRECT_RESP or FFA_INTERRUPT,
+ * but only expect the former. Expect SMC32 convention from SP.
+ */
+ if (!is_ffa_direct_response(ret)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret) != CACTUS_SUCCESS ||
+ cactus_echo_get_val(ret) != value) {
+ ERROR("Echo Failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Handler that is passed during tftf_cpu_on to individual CPU cores.
+ * Runs a specific core and send a direct message request.
+ * Expects core_pos | SP_ID as a response.
+ */
+static test_result_t run_spm_direct_message(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ test_result_t ret = TEST_RESULT_SUCCESS;
+ struct ffa_value ffa_ret;
+
+ /*
+ * Send a direct message request to SP1 (MP SP) from current physical
+ * CPU. Notice SP1 ECs are already woken as a result of the PSCI_CPU_ON
+ * invocation so they already reached the message loop.
+ * The SPMC uses the MP pinned context corresponding to the physical
+ * CPU emitting the request.
+ */
+ ret = send_cactus_echo_cmd(HYP_ID, SP_ID(1), ECHO_VAL1);
+ if (ret != TEST_RESULT_SUCCESS) {
+ goto out;
+ }
+
+ /*
+ * Secure Partitions beyond the first SP only have their first
+ * EC (or vCPU0) woken up at boot time by the SPMC.
+ * Other ECs need one round of ffa_run to reach the message loop.
+ */
+ ffa_ret = ffa_run(SP_ID(2), core_pos);
+ if (ffa_func_id(ffa_ret) != FFA_MSG_WAIT) {
+ ERROR("Failed to run SP%x on core %u\n", SP_ID(2),
+ core_pos);
+ ret = TEST_RESULT_FAIL;
+ goto out;
+ }
+
+ /*
+ * Send a direct message request to SP2 (MP SP) from current physical
+ * CPU. The SPMC uses the MP pinned context corresponding to the
+ * physical CPU emitting the request.
+ */
+ ret = send_cactus_echo_cmd(HYP_ID, SP_ID(2), ECHO_VAL2);
+ if (ret != TEST_RESULT_SUCCESS) {
+ goto out;
+ }
+
+ /*
+ * Send a direct message request to SP3 (UP SP) from current physical CPU.
+ * The SPMC uses the single vCPU migrated to the new physical core.
+ * The single SP vCPU may receive requests from multiple physical CPUs.
+ * Thus it is possible one message is being processed on one core while
+ * another (or multiple) cores attempt sending a new direct message
+ * request. In such case the cores attempting the new request receive
+ * a busy response from the SPMC. To handle this case a retry loop is
+ * implemented permitting some fairness.
+ */
+ uint32_t trial_loop = 5U;
+ while (trial_loop--) {
+ ffa_ret = cactus_echo_send_cmd(HYP_ID, SP_ID(3), ECHO_VAL3);
+ if ((ffa_func_id(ffa_ret) == FFA_ERROR) &&
+ (ffa_error_code(ffa_ret) == FFA_ERROR_BUSY)) {
+ VERBOSE("%s(%u) trial %u\n", __func__,
+ core_pos, trial_loop);
+ waitms(1);
+ continue;
+ }
+
+ if (is_ffa_direct_response(ffa_ret) == true) {
+ if (cactus_get_response(ffa_ret) != CACTUS_SUCCESS ||
+ cactus_echo_get_val(ffa_ret) != ECHO_VAL3) {
+ ERROR("Echo Failed!\n");
+ ret = TEST_RESULT_FAIL;
+ }
+
+ goto out;
+ }
+ }
+
+ ret = TEST_RESULT_FAIL;
+
+out:
+ return ret;
+}
+
+/*
+ * Secondary core will perform sequentially a call to secure and realm worlds.
+ */
+static test_result_t non_secure_call_secure_and_realm(void)
+{
+ test_result_t result = run_spm_direct_message();
+ if (result != TEST_RESULT_SUCCESS)
+ return result;
+ return realm_multi_cpu_payload_del_undel();
+}
+
+/*
+ * Non secure call secure synchronously in parallel
+ * with all other cores in this test
+ */
+static test_result_t non_secure_call_secure_multi_cpu_sync(void)
+{
+ return wait_then_call(run_spm_direct_message);
+}
+
+/*
+ * Multi CPU testing of delegate and undelegate of granules
+ * The granules are first randomly initialized to either realm or non secure
+ * using the function init_buffer_del and then the function below
+ * assigns NUM_GRANULES to each CPU for delegation or undelgation
+ * depending upon the initial state
+ */
+static test_result_t realm_multi_cpu_payload_del_undel(void)
+{
+ u_register_t retrmm;
+ unsigned int cpu_node;
+
+ cpu_node = platform_get_core_pos(read_mpidr_el1() & MPID_MASK);
+
+ for (int i = 0; i < NUM_GRANULES; i++) {
+ if (bufferstate[((cpu_node * NUM_GRANULES) + i)] == B_UNDELEGATED) {
+ retrmm = host_rmi_granule_delegate((u_register_t)
+ &bufferdelegate[((cpu_node *
+ NUM_GRANULES) + i) * GRANULE_SIZE]);
+ bufferstate[((cpu_node * NUM_GRANULES) + i)] = B_DELEGATED;
+ } else {
+ retrmm = host_rmi_granule_undelegate((u_register_t)
+ &bufferdelegate[((cpu_node *
+ NUM_GRANULES) + i) * GRANULE_SIZE]);
+ bufferstate[((cpu_node * NUM_GRANULES) + i)] = B_UNDELEGATED;
+ }
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Delegate operation returns fail, %lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Non secure call realm synchronously in parallel
+ * with all other cores in this test
+ */
+static test_result_t non_secure_call_realm_multi_cpu_sync(void)
+{
+ return wait_then_call(realm_multi_cpu_payload_del_undel);
+}
+
+/*
+ * NS world communicate with S and RL worlds in series via SMC from a single core.
+ */
+test_result_t test_spm_rmm_serial_smc(void)
+{
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int mpidr;
+
+ /**********************************************************************
+ * Check SPMC has ffa_version and expected FFA endpoints are deployed.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+
+ host_rmi_init_cmp_result();
+
+ /*
+ * Randomize the initial state of the RMI granules to realm or non-secure
+ */
+ if (init_buffer_del_spm_rmi() == TEST_RESULT_FAIL) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Preparation step:
+ * Find another CPU than the lead CPU and power it on.
+ */
+ mpidr = tftf_find_any_cpu_other_than(lead_mpid);
+ assert(mpidr != INVALID_MPID);
+
+ /*
+ * Run SPM direct message call and RMI call in serial on a second core.
+ * wait for core power cycle between each call.
+ */
+ for (size_t i = 0; i < MAX_REPEATED_TEST; i++) {
+ /* SPM FF-A direct message call */
+ if (TEST_RESULT_SUCCESS != run_on_cpu(mpidr,
+ (uintptr_t)non_secure_call_secure_and_realm)) {
+ return TEST_RESULT_FAIL;
+ }
+ /* Wait for the target CPU to finish the test execution */
+ wait_for_core_to_turn_off(mpidr);
+ }
+
+ if (reset_buffer_del_spm_rmi() != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ VERBOSE("Done exiting.\n");
+
+ /**********************************************************************
+ * Report register comparison result
+ **********************************************************************/
+ return host_cmp_result();
+}
+
+/*
+ * Test function to let NS world communicate with S and RL worlds in parallel
+ * via SMC using multiple cores
+ */
+test_result_t test_spm_rmm_parallel_smc(void)
+{
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_node, mpidr;
+
+ /**********************************************************************
+ * Check SPMC has ffa_version and expected FFA endpoints are deployed.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+
+ host_rmi_init_cmp_result();
+
+ /*
+ * Randomize the initial state of the RMI granules to realm or non-secure
+ */
+ if (init_buffer_del_spm_rmi() == TEST_RESULT_FAIL) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Main test to run both SPM and RMM or TRP together in parallel
+ */
+ for (int i = 0; i < MAX_REPEATED_TEST; i++) {
+ VERBOSE("Main test(%d) to run both SPM and RMM or\
+ TRP together in parallel...\n", i);
+
+ /* Reinitialize all CPUs event */
+ for (unsigned int i = 0U; i < PLATFORM_CORE_COUNT; i++) {
+ tftf_init_event(&cpu_booted[i]);
+ }
+
+ /*
+ * Randomise the assignment of the CPU's to either SPM or RMI
+ */
+ rand_cpu_spm_rmi();
+
+ /*
+ * for each CPU we assign it randomly either spm or rmi test function
+ */
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (mpidr == lead_mpid) {
+ continue;
+ }
+ if (spm_rmi_test(mpidr) == 1) {
+ if (TEST_RESULT_SUCCESS != run_on_cpu(mpidr,
+ (uintptr_t)non_secure_call_secure_multi_cpu_sync)) {
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ if (TEST_RESULT_SUCCESS != run_on_cpu(mpidr,
+ (uintptr_t)non_secure_call_realm_multi_cpu_sync)) {
+ return TEST_RESULT_FAIL;
+ }
+ }
+ }
+
+ VERBOSE("Waiting for secondary CPUs to turn off ...\n");
+ wait_for_non_lead_cpus();
+ }
+
+ VERBOSE("Done exiting.\n");
+
+ if (reset_buffer_del_spm_rmi() != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /**********************************************************************
+ * Report register comparison result
+ **********************************************************************/
+ return host_cmp_result();
+}
diff --git a/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c b/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c
new file mode 100644
index 000000000..d308784c5
--- /dev/null
+++ b/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c
@@ -0,0 +1,546 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+#include <drivers/arm/arm_gic.h>
+#include <debug.h>
+#include <platform.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <test_helpers.h>
+
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_realm_pmu.h>
+#include <host_shared_data.h>
+
+static uint64_t is_secondary_cpu_on;
+static struct realm realm;
+
+/*
+ * Test tries to create max Rec
+ * Enters all Rec from single CPU
+ */
+test_result_t host_realm_multi_rec_single_cpu(void)
+{
+ bool ret1, ret2;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,
+ RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE};
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, MAX_REC_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ for (unsigned int i = 0; i < MAX_REC_COUNT; i++) {
+ host_shared_data_set_host_val(&realm, i, HOST_ARG1_INDEX, 10U);
+ ret1 = host_enter_realm_execute(&realm, REALM_SLEEP_CMD,
+ RMI_EXIT_HOST_CALL, i);
+ if (!ret1) {
+ break;
+ }
+ }
+
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Test creates 3 Rec
+ * Rec0 requests CPU ON for rec 1
+ * Host denies CPU On for rec 1
+ * Host tried to enter rec 1 and fails
+ * Host re-enters rec 0
+ * Rec 0 checks CPU ON is denied
+ * Rec0 requests CPU ON for rec 2
+ * Host denies CPU On which should fail as rec is runnable
+ * Host allows CPU ON and re-enters rec 0
+ * Rec 0 checks return already_on
+ */
+test_result_t host_realm_multi_rec_psci_denied(void)
+{
+ bool ret1, ret2;
+ u_register_t ret;
+ unsigned int host_call_result;
+ u_register_t exit_reason;
+ unsigned int rec_num;
+ struct rmi_rec_run *run;
+ /* Create 3 rec Rec 0 and 2 are runnable, Rec 1 in not runnable */
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_NOT_RUNNABLE, RMI_RUNNABLE};
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 3U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret1 = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_PSCI_DENIED_CMD,
+ RMI_EXIT_PSCI, 0U);
+ run = (struct rmi_rec_run *)realm.run[0];
+
+ if (run->exit.gprs[0] != SMC_PSCI_CPU_ON_AARCH64) {
+ ERROR("Host did not receive CPU ON request\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num != 1U) {
+ ERROR("Invalid mpidr requested\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+ INFO("Requesting PSCI Complete Status Denied REC %d\n", rec_num);
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_DENIED);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete failed\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+
+ /* Enter rec1, should fail */
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 1U);
+ if (ret == RMI_SUCCESS) {
+ ERROR("Rec1 enter should have failed\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 0U);
+
+ if (run->exit.gprs[0] != SMC_PSCI_AFFINITY_INFO_AARCH64) {
+ ERROR("Host did not receive PSCI_AFFINITY_INFO request\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num != 1U) {
+ ERROR("Invalid mpidr requested\n");
+ goto destroy_realm;
+ }
+
+ INFO("Requesting PSCI Complete Affinity Info REC %d\n", rec_num);
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_SUCCESS);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete failed\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+
+ /* Re-enter REC0 complete PSCI_AFFINITY_INFO */
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 0U);
+
+
+ if (run->exit.gprs[0] != SMC_PSCI_CPU_ON_AARCH64) {
+ ERROR("Host did not receive CPU ON request\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num != 2U) {
+ ERROR("Invalid mpidr requested\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+
+ INFO("Requesting PSCI Complete Status Denied REC %d\n", rec_num);
+ /* PSCI_DENIED should fail as rec2 is RMI_RUNNABLE */
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_DENIED);
+ if (ret == RMI_SUCCESS) {
+ ret1 = false;
+ ERROR("host_rmi_psci_complete should have failed\n");
+ goto destroy_realm;
+ }
+
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 0U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("Rec0 re-enter failed\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+}
+
+/* Lock used to avoid concurrent accesses to the secondary_cpu_on counter */
+spinlock_t secondary_cpu_lock;
+
+static test_result_t cpu_on_handler2(void)
+{
+ bool ret;
+
+ spin_lock(&secondary_cpu_lock);
+ is_secondary_cpu_on++;
+ spin_unlock(&secondary_cpu_lock);
+
+ ret = host_enter_realm_execute(&realm, REALM_LOOP_CMD,
+ RMI_EXIT_IRQ, is_secondary_cpu_on);
+ if (!ret) {
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t host_realm_multi_rec_exit_irq(void)
+{
+ bool ret1, ret2;
+ unsigned int rec_count = MAX_REC_COUNT;
+ u_register_t other_mpidr, my_mpidr, ret;
+ int cpu_node;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,
+ RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,
+ RMI_RUNNABLE};
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(rec_count);
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, rec_count)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ is_secondary_cpu_on = 0U;
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+ ret1 = host_enter_realm_execute(&realm, REALM_GET_RSI_VERSION, RMI_EXIT_HOST_CALL, 0U);
+ for_each_cpu(cpu_node) {
+ other_mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (other_mpidr == my_mpidr) {
+ continue;
+ }
+ /* Power on the other CPU */
+ ret = tftf_try_cpu_on(other_mpidr, (uintptr_t)cpu_on_handler2, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ goto destroy_realm;
+ }
+ }
+
+ INFO("Wait for all CPU to come up\n");
+ while (is_secondary_cpu_on != (rec_count - 1U)) {
+ waitms(100U);
+ }
+
+destroy_realm:
+ tftf_irq_enable(IRQ_NS_SGI_7, GIC_HIGHEST_NS_PRIORITY);
+ for (unsigned int i = 1U; i < rec_count; i++) {
+ INFO("Raising NS IRQ for rec %d\n", i);
+ host_rec_send_sgi(&realm, IRQ_NS_SGI_7, i);
+ }
+ tftf_irq_disable(IRQ_NS_SGI_7);
+ ret2 = host_destroy_realm(&realm);
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+}
+
+
+static test_result_t cpu_on_handler(void)
+{
+ bool ret;
+ struct rmi_rec_run *run;
+ unsigned int i;
+
+ spin_lock(&secondary_cpu_lock);
+ i = ++is_secondary_cpu_on;
+ spin_unlock(&secondary_cpu_lock);
+ ret = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
+ RMI_EXIT_PSCI, i);
+ if (ret) {
+ run = (struct rmi_rec_run *)realm.run[i];
+ if (run->exit.gprs[0] == SMC_PSCI_CPU_OFF) {
+ return TEST_RESULT_SUCCESS;
+ }
+ }
+ ERROR("Rec %d failed\n", i);
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * The test creates a realm with MAX recs
+ * On receiving PSCI_CPU_ON call from REC0 for all other recs,
+ * the test completes the PSCI call and re-enters REC0.
+ * Turn ON secondary CPUs upto a max of MAX_REC_COUNT.
+ * Each of the secondary then enters Realm with a different REC
+ * and executes the test REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD in Realm payload.
+ * It is expected that the REC will exit with PSCI_CPU_OFF as the exit reason.
+ * REC00 checks if all other CPUs are off, via PSCI_AFFINITY_INFO.
+ * Host completes the PSCI requests.
+ */
+test_result_t host_realm_multi_rec_multiple_cpu(void)
+{
+ bool ret1, ret2;
+ test_result_t ret3 = TEST_RESULT_FAIL;
+ int ret = RMI_ERROR_INPUT;
+ u_register_t rec_num;
+ u_register_t other_mpidr, my_mpidr;
+ struct rmi_rec_run *run;
+ unsigned int host_call_result, i = 0U;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+ RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+ RMI_NOT_RUNNABLE};
+ u_register_t exit_reason;
+ int cpu_node;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(MAX_REC_COUNT);
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, MAX_REC_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ is_secondary_cpu_on = 0U;
+ init_spinlock(&secondary_cpu_lock);
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, MAX_REC_COUNT);
+ ret1 = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
+ RMI_EXIT_PSCI, 0U);
+ if (!ret1) {
+ ERROR("Host did not receive CPU ON request\n");
+ goto destroy_realm;
+ }
+ while (true) {
+ run = (struct rmi_rec_run *)realm.run[0];
+ if (run->exit.gprs[0] != SMC_PSCI_CPU_ON_AARCH64) {
+ ERROR("Host did not receive CPU ON request\n");
+ goto destroy_realm;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num >= MAX_REC_COUNT) {
+ ERROR("Invalid mpidr requested\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_SUCCESS);
+ if (ret == RMI_SUCCESS) {
+ /* Re-enter REC0 complete CPU_ON */
+ ret = host_realm_rec_enter(&realm, &exit_reason,
+ &host_call_result, 0U);
+ if (ret != RMI_SUCCESS || exit_reason != RMI_EXIT_PSCI) {
+ break;
+ }
+ } else {
+ ERROR("host_rmi_psci_complete failed\n");
+ goto destroy_realm;
+ }
+ }
+ if (exit_reason != RMI_EXIT_HOST_CALL || host_call_result != TEST_RESULT_SUCCESS) {
+ ERROR("Realm failed\n");
+ goto destroy_realm;
+ }
+
+ /* Turn on all CPUs */
+ for_each_cpu(cpu_node) {
+ if (i == (MAX_REC_COUNT - 1U)) {
+ break;
+ }
+ other_mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (other_mpidr == my_mpidr) {
+ continue;
+ }
+
+ /* Power on the other CPU */
+ ret = tftf_try_cpu_on(other_mpidr, (uintptr_t)cpu_on_handler, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("TFTF CPU ON failed\n");
+ goto destroy_realm;
+ }
+ i++;
+ }
+
+ while (true) {
+ /* Re-enter REC0 complete PSCI_AFFINITY_INFO */
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 0U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("Rec0 re-enter failed\n");
+ goto destroy_realm;
+ }
+ if (run->exit.gprs[0] != SMC_PSCI_AFFINITY_INFO_AARCH64) {
+ break;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num >= MAX_REC_COUNT) {
+ ERROR("Invalid mpidr requested\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_SUCCESS);
+
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete failed\n");
+ goto destroy_realm;
+ }
+ }
+
+ if (ret == RMI_SUCCESS && exit_reason == RMI_EXIT_HOST_CALL) {
+ ret3 = host_call_result;
+ }
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if ((ret != RMI_SUCCESS) || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return ret3;
+}
+
+/*
+ * Test creates 2 realms with multiple recs
+ * realm1, rec1 requests CPU_ON for rec2
+ * Host calls PSCI_COMPLETE with wrong rec3, checks for error
+ * Host calls PSCI_COMPLETE with wrong rec from different realm, checks for error
+ * Host calls PSCI_COMPLETE with correct rec, checks for success
+ * Host attempts to execute rec which is NOT_RUNNABLE, checks for error
+ */
+test_result_t host_realm_multi_rec_multiple_cpu2(void)
+{
+ bool ret1, ret2;
+ test_result_t ret3 = TEST_RESULT_FAIL;
+ int ret = RMI_ERROR_INPUT;
+ u_register_t rec_num;
+ struct rmi_rec_run *run;
+ unsigned int host_call_result;
+ struct realm realm2;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+ RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+ RMI_NOT_RUNNABLE};
+ u_register_t exit_reason;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, MAX_REC_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_create_activate_realm_payload(&realm2, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE + PAGE_POOL_MAX_SIZE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ ret2 = host_destroy_realm(&realm);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ /* Realm to request CPU_ON for rec 2 */
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, 2U);
+ ret1 = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
+ RMI_EXIT_PSCI, 0U);
+ if (!ret1) {
+ ERROR("Host did not receive CPU ON request\n");
+ goto destroy_realm;
+ }
+ run = (struct rmi_rec_run *)realm.run[0];
+ if (run->exit.gprs[0] != SMC_PSCI_CPU_ON_AARCH64) {
+ ERROR("Host2 did not receive CPU ON request\n");
+ goto destroy_realm;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num >= MAX_REC_COUNT) {
+ ERROR("Invalid mpidr requested\n");
+ goto destroy_realm;
+ }
+
+ /* pass wrong target_rec, expect error */
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num + 1U],
+ (unsigned long)PSCI_E_SUCCESS);
+ if (ret == RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete wrong target_rec didn't fail ret=%x\n",
+ ret);
+ goto destroy_realm;
+ }
+
+ /* pass wrong target_rec from different realm, expect error */
+ ret = host_rmi_psci_complete(realm.rec[0], realm2.rec[0U],
+ (unsigned long)PSCI_E_SUCCESS);
+ if (ret == RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete wrong target_rec didn't fail ret=%x\n",
+ ret);
+ goto destroy_realm;
+ }
+
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_SUCCESS);
+
+ /* Try to run Rec3(CPU OFF/NOT_RUNNABLE), expect error */
+ ret = host_realm_rec_enter(&realm, &exit_reason,
+ &host_call_result, 3U);
+
+ if (ret == RMI_SUCCESS) {
+ ERROR("Expected error\n");
+ goto destroy_realm;
+ }
+ ret3 = TEST_RESULT_SUCCESS;
+
+destroy_realm:
+ ret1 = host_destroy_realm(&realm);
+ ret2 = host_destroy_realm(&realm2);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): failed destroy=%d, %d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+ return ret3;
+}
diff --git a/tftf/tests/runtime_services/realm_payload/host_realm_payload_simd_tests.c b/tftf/tests/runtime_services/realm_payload/host_realm_payload_simd_tests.c
new file mode 100644
index 000000000..c19098589
--- /dev/null
+++ b/tftf/tests/runtime_services/realm_payload/host_realm_payload_simd_tests.c
@@ -0,0 +1,1365 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <assert.h>
+#include <arch_features.h>
+#include <debug.h>
+#include <test_helpers.h>
+#include <lib/extensions/fpu.h>
+#include <lib/extensions/sme.h>
+#include <lib/extensions/sve.h>
+
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_realm_simd.h>
+#include <host_shared_data.h>
+
+#define NS_SVE_OP_ARRAYSIZE 1024U
+#define SVE_TEST_ITERATIONS 50U
+
+/* Min test iteration count for 'host_and_realm_check_simd' test */
+#define TEST_ITERATIONS_MIN (16U)
+
+/* Number of FPU configs: none */
+#define NUM_FPU_CONFIGS (0U)
+
+/* Number of SVE configs: SVE_VL, SVE hint */
+#define NUM_SVE_CONFIGS (2U)
+
+/* Number of SME configs: SVE_SVL, FEAT_FA64, Streaming mode */
+#define NUM_SME_CONFIGS (3U)
+
+#define NS_NORMAL_SVE 0x1U
+#define NS_STREAMING_SVE 0x2U
+
+typedef enum security_state {
+ NONSECURE_WORLD = 0U,
+ REALM_WORLD,
+ SECURITY_STATE_MAX
+} security_state_t;
+
+typedef enum {
+ TEST_FPU = 0U,
+ TEST_SVE,
+ TEST_SME,
+} simd_test_t;
+
+static int ns_sve_op_1[NS_SVE_OP_ARRAYSIZE];
+static int ns_sve_op_2[NS_SVE_OP_ARRAYSIZE];
+
+static sve_z_regs_t ns_sve_z_regs_write;
+static sve_z_regs_t ns_sve_z_regs_read;
+
+static sve_p_regs_t ns_sve_p_regs_write;
+static sve_p_regs_t ns_sve_p_regs_read;
+
+static sve_ffr_regs_t ns_sve_ffr_regs_write;
+static sve_ffr_regs_t ns_sve_ffr_regs_read;
+
+static fpu_q_reg_t ns_fpu_q_regs_write[FPU_Q_COUNT];
+static fpu_q_reg_t ns_fpu_q_regs_read[FPU_Q_COUNT];
+
+static fpu_cs_regs_t ns_fpu_cs_regs_write;
+static fpu_cs_regs_t ns_fpu_cs_regs_read;
+
+static struct realm realm;
+
+/* Skip test if SVE is not supported in H/W or in RMI features */
+#define CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(_reg0) \
+ do { \
+ SKIP_TEST_IF_SVE_NOT_SUPPORTED(); \
+ \
+ /* Get RMM support for SVE and its max SVE VL */ \
+ if (host_rmi_features(0UL, &_reg0) != REALM_SUCCESS) { \
+ ERROR("Failed to get RMI feat_reg0\n"); \
+ return TEST_RESULT_FAIL; \
+ } \
+ \
+ /* SVE not supported in RMI features? */ \
+ if ((_reg0 & RMI_FEATURE_REGISTER_0_SVE_EN) == 0UL) { \
+ ERROR("SVE not in RMI features, skipping\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+static test_result_t host_create_sve_realm_payload(bool sve_en, uint8_t sve_vq)
+{
+ u_register_t feature_flag;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+
+ if (sve_en) {
+ feature_flag = RMI_FEATURE_REGISTER_0_SVE_EN |
+ INPLACE(FEATURE_SVE_VL, sve_vq);
+ } else {
+ feature_flag = 0UL;
+ }
+
+ /* Initialise Realm payload */
+ if (!host_create_activate_realm_payload(&realm,
+ (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ feature_flag, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Create shared memory between Host and Realm */
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * RMI should report SVE VL in RMI features and it must be the same value as the
+ * max SVE VL seen by the NS world.
+ */
+test_result_t host_check_rmi_reports_proper_sve_vl(void)
+{
+ u_register_t rmi_feat_reg0;
+ uint8_t rmi_sve_vq;
+ uint8_t ns_sve_vq;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ rmi_sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ /*
+ * Configure NS to arch supported max VL and get the value reported
+ * by rdvl
+ */
+ sve_config_vq(SVE_VQ_ARCH_MAX);
+ ns_sve_vq = SVE_VL_TO_VQ(sve_rdvl_1());
+
+ if (rmi_sve_vq != ns_sve_vq) {
+ ERROR("RMI max SVE VL %u bits don't match NS max "
+ "SVE VL %u bits\n", SVE_VQ_TO_BITS(rmi_sve_vq),
+ SVE_VQ_TO_BITS(ns_sve_vq));
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Test Realm creation with SVE enabled and run command rdvl */
+test_result_t host_sve_realm_cmd_rdvl(void)
+{
+ host_shared_data_t *sd;
+ struct sve_cmd_rdvl *rl_output;
+ uint8_t sve_vq, rl_max_sve_vq;
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ bool realm_rc;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ rc = host_create_sve_realm_payload(true, sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ ERROR("Failed to create Realm with SVE\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_RDVL,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (realm_rc != true) {
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+
+ /* Check if rdvl matches the SVE VL created */
+ sd = host_get_shared_structure(&realm, 0U);
+ rl_output = (struct sve_cmd_rdvl *)sd->realm_cmd_output_buffer;
+ rl_max_sve_vq = SVE_VL_TO_VQ(rl_output->rdvl);
+ if (sve_vq == rl_max_sve_vq) {
+ rc = TEST_RESULT_SUCCESS;
+ } else {
+ ERROR("Realm created with max VL: %u bits, but Realm reported "
+ "max VL as: %u bits\n", SVE_VQ_TO_BITS(sve_vq),
+ SVE_VQ_TO_BITS(rl_max_sve_vq));
+ rc = TEST_RESULT_FAIL;
+ }
+
+rm_realm:
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/* Test Realm creation with SVE enabled but with invalid SVE VL */
+test_result_t host_sve_realm_test_invalid_vl(void)
+{
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ uint8_t sve_vq;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ /*
+ * Pass a sve_vq that is greater than the value supported by RMM
+ * and check whether creating Realm fails
+ */
+ rc = host_create_sve_realm_payload(true, (sve_vq + 1));
+ if (rc == TEST_RESULT_SUCCESS) {
+ ERROR("Error: Realm created with invalid SVE VL %u\n", (sve_vq + 1));
+ host_destroy_realm(&realm);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t _host_sve_realm_check_id_registers(bool sve_en)
+{
+ host_shared_data_t *sd;
+ struct sve_cmd_id_regs *r_regs;
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ bool realm_rc;
+ uint8_t sve_vq = 0U;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (sve_en) {
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+ }
+
+ rc = host_create_sve_realm_payload(sve_en, sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_ID_REGISTERS,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+
+ sd = host_get_shared_structure(&realm, 0U);
+ r_regs = (struct sve_cmd_id_regs *)sd->realm_cmd_output_buffer;
+
+ /* Check ID register SVE flags */
+ if (sve_en) {
+ rc = TEST_RESULT_SUCCESS;
+ if (EXTRACT(ID_AA64PFR0_SVE, r_regs->id_aa64pfr0_el1) == 0UL) {
+ ERROR("ID_AA64PFR0_EL1: SVE not enabled\n");
+ rc = TEST_RESULT_FAIL;
+ }
+ if (r_regs->id_aa64zfr0_el1 == 0UL) {
+ ERROR("ID_AA64ZFR0_EL1: No SVE features present\n");
+ rc = TEST_RESULT_FAIL;
+ }
+ } else {
+ rc = TEST_RESULT_SUCCESS;
+ if (EXTRACT(ID_AA64PFR0_SVE, r_regs->id_aa64pfr0_el1) != 0UL) {
+ ERROR("ID_AA64PFR0_EL1: SVE enabled\n");
+ rc = TEST_RESULT_FAIL;
+ }
+ if (r_regs->id_aa64zfr0_el1 != 0UL) {
+ ERROR("ID_AA64ZFR0_EL1: Realm reported non-zero value\n");
+ rc = TEST_RESULT_FAIL;
+ }
+ }
+
+rm_realm:
+ host_destroy_realm(&realm);
+ return rc;
+}
+
+/* Test ID_AA64PFR0_EL1, ID_AA64ZFR0_EL1_SVE values in SVE Realm */
+test_result_t host_sve_realm_cmd_id_registers(void)
+{
+ return _host_sve_realm_check_id_registers(true);
+}
+
+/* Test ID_AA64PFR0_EL1, ID_AA64ZFR0_EL1_SVE values in non SVE Realm */
+test_result_t host_non_sve_realm_cmd_id_registers(void)
+{
+ return _host_sve_realm_check_id_registers(false);
+}
+
+static void print_sve_vl_bitmap(uint32_t vl_bitmap)
+{
+ for (uint8_t vq = 0U; vq <= SVE_VQ_ARCH_MAX; vq++) {
+ if ((vl_bitmap & BIT_32(vq)) != 0U) {
+ INFO("\t%u\n", SVE_VQ_TO_BITS(vq));
+ }
+ }
+}
+
+/* Create SVE Realm and probe all the supported VLs */
+test_result_t host_sve_realm_cmd_probe_vl(void)
+{
+ host_shared_data_t *sd;
+ struct sve_cmd_probe_vl *rl_output;
+ uint32_t vl_bitmap_expected;
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ bool realm_rc;
+ uint8_t sve_vq;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ rc = host_create_sve_realm_payload(true, sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ /*
+ * Configure TFTF with sve_vq and probe all VLs and compare it with
+ * the bitmap returned from Realm
+ */
+ vl_bitmap_expected = sve_probe_vl(sve_vq);
+
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_PROBE_VL,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+
+ sd = host_get_shared_structure(&realm, 0U);
+ rl_output = (struct sve_cmd_probe_vl *)sd->realm_cmd_output_buffer;
+
+ INFO("Supported SVE vector length in bits (expected):\n");
+ print_sve_vl_bitmap(vl_bitmap_expected);
+
+ INFO("Supported SVE vector length in bits (probed):\n");
+ print_sve_vl_bitmap(rl_output->vl_bitmap);
+
+ if (vl_bitmap_expected == rl_output->vl_bitmap) {
+ rc = TEST_RESULT_SUCCESS;
+ } else {
+ rc = TEST_RESULT_FAIL;
+ }
+
+rm_realm:
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/* Check whether RMM preserves NS ZCR_EL2 register. */
+test_result_t host_sve_realm_check_config_register(void)
+{
+ u_register_t ns_zcr_el2, ns_zcr_el2_cur;
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ uint8_t vq;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ rc = host_create_sve_realm_payload(true, vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ /*
+ * Configure TFTF from 0 to SVE_VQ_ARCH_MAX, and in each iteration check
+ * if NS ZCR_EL2 is same before and after call to run Realm.
+ */
+ rc = TEST_RESULT_SUCCESS;
+ for (vq = 0U; vq <= SVE_VQ_ARCH_MAX; vq++) {
+ bool realm_rc;
+
+ sve_config_vq(vq);
+ ns_zcr_el2 = read_zcr_el2();
+
+ /* Call Realm to run SVE command */
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_RDVL,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ ERROR("Realm command REALM_SVE_RDVL failed\n");
+ rc = TEST_RESULT_FAIL;
+ break;
+ }
+ ns_zcr_el2_cur = read_zcr_el2();
+
+ if (ns_zcr_el2 != ns_zcr_el2_cur) {
+ ERROR("NS ZCR_EL2 expected: 0x%lx, got: 0x%lx\n",
+ ns_zcr_el2, ns_zcr_el2_cur);
+ rc = TEST_RESULT_FAIL;
+ }
+ }
+
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/*
+ * Sends command to Realm to do SVE operations, while NS is also doing SVE
+ * operations.
+ * Returns:
+ * false - On success
+ * true - On failure
+ */
+static bool callback_realm_do_sve(void)
+{
+
+ return !host_enter_realm_execute(&realm, REALM_SVE_OPS,
+ RMI_EXIT_HOST_CALL, 0U);
+}
+
+/*
+ * Sends command to Realm to do SVE operations, while NS is also doing SVE
+ * operations.
+ * Returns:
+ * false - On success
+ * true - On failure
+ */
+static bool callback_realm_do_fpu(void)
+{
+ return !host_enter_realm_execute(&realm, REALM_REQ_FPU_FILL_CMD,
+ RMI_EXIT_HOST_CALL, 0U);
+}
+
+static test_result_t run_sve_vectors_operations(bool realm_sve_en,
+ uint8_t realm_sve_vq,
+ int ns_sve_mode)
+{
+ bool (*realm_callback)(void);
+ test_result_t rc;
+ bool cb_err;
+ unsigned int i;
+ int val;
+
+ rc = host_create_sve_realm_payload(realm_sve_en, realm_sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ /* Get at random value to do sve_subtract */
+ val = rand();
+ for (i = 0U; i < NS_SVE_OP_ARRAYSIZE; i++) {
+ ns_sve_op_1[i] = val - i;
+ ns_sve_op_2[i] = 1;
+ }
+
+ if (realm_sve_en) {
+ realm_callback = callback_realm_do_sve;
+ } else {
+ realm_callback = callback_realm_do_fpu;
+ }
+
+ for (i = 0U; i < SVE_TEST_ITERATIONS; i++) {
+ /* Config NS world with random SVE VL or SVE SVL */
+ if (ns_sve_mode == NS_NORMAL_SVE) {
+ sve_config_vq(SVE_GET_RANDOM_VQ);
+ } else {
+ sme_config_svq(SME_GET_RANDOM_SVQ);
+ }
+
+ /* Perform SVE operations with intermittent calls to Realm */
+ cb_err = sve_subtract_arrays_interleaved(ns_sve_op_1,
+ ns_sve_op_1,
+ ns_sve_op_2,
+ NS_SVE_OP_ARRAYSIZE,
+ realm_callback);
+ if (cb_err) {
+ ERROR("Callback to realm failed\n");
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+ }
+
+ /* Check result of SVE operations. */
+ rc = TEST_RESULT_SUCCESS;
+
+ for (i = 0U; i < NS_SVE_OP_ARRAYSIZE; i++) {
+ if (ns_sve_op_1[i] != (val - i - SVE_TEST_ITERATIONS)) {
+ ERROR("%s op failed at idx: %u, expected: 0x%x received:"
+ " 0x%x\n", (ns_sve_mode == NS_NORMAL_SVE) ?
+ "SVE" : "SVE", i,
+ (val - i - SVE_TEST_ITERATIONS), ns_sve_op_1[i]);
+ rc = TEST_RESULT_FAIL;
+ }
+ }
+
+rm_realm:
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/*
+ * Intermittently switch to Realm while doing NS is doing SVE ops in Normal
+ * SVE mode.
+ *
+ * This testcase runs for SVE only config or SVE + SME config
+ */
+test_result_t host_sve_realm_check_vectors_operations(void)
+{
+ u_register_t rmi_feat_reg0;
+ uint8_t realm_sve_vq;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ realm_sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ /* Run SVE operations in Normal SVE mode */
+ return run_sve_vectors_operations(true, realm_sve_vq, NS_NORMAL_SVE);
+}
+
+/*
+ * Intermittently switch to Realm while doing NS is doing SVE ops in Streaming
+ * SVE mode
+ *
+ * This testcase runs for SME only config or SVE + SME config
+ */
+test_result_t host_sve_realm_check_streaming_vectors_operations(void)
+{
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ uint8_t realm_sve_vq;
+ bool realm_sve_en;
+
+ SKIP_TEST_IF_SME_NOT_SUPPORTED();
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (is_armv8_2_sve_present()) {
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+ realm_sve_en = true;
+ realm_sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL,
+ rmi_feat_reg0);
+ } else {
+ realm_sve_en = 0;
+ realm_sve_vq = 0;
+ }
+
+ /* Enter Streaming SVE mode */
+ sme_smstart(SMSTART_SM);
+
+ /* Run SVE operations in Streaming SVE mode */
+ rc = run_sve_vectors_operations(realm_sve_en, realm_sve_vq,
+ NS_STREAMING_SVE);
+
+ /* Exit Streaming SVE mode */
+ sme_smstop(SMSTOP_SM);
+
+ return rc;
+}
+
+/*
+ * Check if RMM leaks Realm SVE registers.
+ * This test is skipped if the supported max VQ is 128 bits, as we won't be able
+ * to run NS and Realm context with lower and higher VQ respectively.
+ * This test does the below steps:
+ *
+ * 1. Set NS SVE VQ to max and write known pattern
+ * 2. NS programs ZCR_EL2 with VQ as 0 (128 bits).
+ * 3. Create Realm with max VQ (higher than NS SVE VQ).
+ * 4. Call Realm to fill in Z registers
+ * 5. Once Realm returns, NS sets ZCR_EL2 with max VQ and reads the Z registers
+ * 6. The upper bits of Z registers must be either 0 or the old values filled by
+ * NS world at step 1.
+ */
+test_result_t host_sve_realm_check_vectors_leaked(void)
+{
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ uint64_t bitmap;
+ bool realm_rc;
+ uint8_t sve_vq;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ /* Skip test if the supported max VQ is 128 bits */
+ if (sve_vq == SVE_VQ_ARCH_MIN) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* 1. Set NS SVE VQ to max and write known pattern */
+ sve_config_vq(sve_vq);
+ (void)memset((void *)&ns_sve_z_regs_write, 0xAA,
+ SVE_VQ_TO_BYTES(sve_vq) * SVE_NUM_VECTORS);
+ sve_z_regs_write(&ns_sve_z_regs_write);
+
+ /* 2. NS programs ZCR_EL2 with VQ as 0 */
+ sve_config_vq(SVE_VQ_ARCH_MIN);
+
+ /* 3. Create Realm with max VQ (higher than NS SVE VQ) */
+ rc = host_create_sve_realm_payload(true, sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ /* 4. Call Realm to fill in Z registers */
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_FILL_REGS,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+
+ /* 5. NS sets ZCR_EL2 with max VQ and reads the Z registers */
+ sve_config_vq(sve_vq);
+ sve_z_regs_read(&ns_sve_z_regs_read);
+
+ /*
+ * 6. The upper bits in Z vectors (sve_vq - SVE_VQ_ARCH_MIN) must
+ * be either 0 or the old values filled by NS world.
+ * TODO: check if upper bits are zero
+ */
+ bitmap = sve_z_regs_compare(&ns_sve_z_regs_write, &ns_sve_z_regs_read);
+ if (bitmap != 0UL) {
+ ERROR("SVE Z regs compare failed (bitmap: 0x%016llx)\n",
+ bitmap);
+ rc = TEST_RESULT_FAIL;
+ } else {
+ rc = TEST_RESULT_SUCCESS;
+ }
+
+rm_realm:
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/*
+ * Create a non SVE Realm and try to access SVE, the Realm must receive
+ * undefined abort.
+ */
+test_result_t host_non_sve_realm_check_undef_abort(void)
+{
+ test_result_t rc;
+ bool realm_rc;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+ SKIP_TEST_IF_SVE_NOT_SUPPORTED();
+
+ rc = host_create_sve_realm_payload(false, 0);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_UNDEF_ABORT,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ ERROR("Realm didn't receive undefined abort\n");
+ rc = TEST_RESULT_FAIL;
+ } else {
+ rc = TEST_RESULT_SUCCESS;
+ }
+
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/* Generate random values and write it to SVE Z, P and FFR registers */
+static void ns_sve_write_rand(void)
+{
+ bool has_ffr = true;
+
+ if (is_feat_sme_supported() && sme_smstat_sm() &&
+ !sme_feat_fa64_enabled()) {
+ has_ffr = false;
+ }
+
+ sve_z_regs_write_rand(&ns_sve_z_regs_write);
+ sve_p_regs_write_rand(&ns_sve_p_regs_write);
+ if (has_ffr) {
+ sve_ffr_regs_write_rand(&ns_sve_ffr_regs_write);
+ }
+}
+
+/* Read SVE Z, P and FFR registers and compare it with the last written values */
+static test_result_t ns_sve_read_and_compare(void)
+{
+ test_result_t rc = TEST_RESULT_SUCCESS;
+ uint64_t bitmap;
+ bool has_ffr = true;
+
+ if (is_feat_sme_supported() && sme_smstat_sm() &&
+ !sme_feat_fa64_enabled()) {
+ has_ffr = false;
+ }
+
+ /* Clear old state */
+ memset((void *)&ns_sve_z_regs_read, 0, sizeof(ns_sve_z_regs_read));
+ memset((void *)&ns_sve_p_regs_read, 0, sizeof(ns_sve_p_regs_read));
+ memset((void *)&ns_sve_ffr_regs_read, 0, sizeof(ns_sve_ffr_regs_read));
+
+ /* Read Z, P, FFR registers to compare it with the last written values */
+ sve_z_regs_read(&ns_sve_z_regs_read);
+ sve_p_regs_read(&ns_sve_p_regs_read);
+ if (has_ffr) {
+ sve_ffr_regs_read(&ns_sve_ffr_regs_read);
+ }
+
+ bitmap = sve_z_regs_compare(&ns_sve_z_regs_write, &ns_sve_z_regs_read);
+ if (bitmap != 0UL) {
+ ERROR("SVE Z regs compare failed (bitmap: 0x%016llx)\n",
+ bitmap);
+ rc = TEST_RESULT_FAIL;
+ }
+
+ bitmap = sve_p_regs_compare(&ns_sve_p_regs_write, &ns_sve_p_regs_read);
+ if (bitmap != 0UL) {
+ ERROR("SVE P regs compare failed (bitmap: 0x%016llx)\n",
+ bitmap);
+ rc = TEST_RESULT_FAIL;
+ }
+
+ if (has_ffr) {
+ bitmap = sve_ffr_regs_compare(&ns_sve_ffr_regs_write,
+ &ns_sve_ffr_regs_read);
+ if (bitmap != 0) {
+ ERROR("SVE FFR regs compare failed "
+ "(bitmap: 0x%016llx)\n", bitmap);
+ rc = TEST_RESULT_FAIL;
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Generate random values and write it to Streaming SVE Z, P and FFR registers.
+ */
+static void ns_sme_write_rand(void)
+{
+ /*
+ * TODO: more SME specific registers like ZA, ZT0 can be included later.
+ */
+
+ /* Fill SVE registers in normal or streaming SVE mode */
+ ns_sve_write_rand();
+}
+
+/*
+ * Read streaming SVE Z, P and FFR registers and compare it with the last
+ * written values
+ */
+static test_result_t ns_sme_read_and_compare(void)
+{
+ /*
+ * TODO: more SME specific registers like ZA, ZT0 can be included later.
+ */
+
+ /* Compares SVE registers in normal or streaming SVE mode */
+ return ns_sve_read_and_compare();
+}
+
+static char *simd_type_to_str(simd_test_t type)
+{
+ if (type == TEST_FPU) {
+ return "FPU";
+ } else if (type == TEST_SVE) {
+ return "SVE";
+ } else if (type == TEST_SME) {
+ return "SME";
+ } else {
+ return "UNKNOWN";
+ }
+}
+
+static void ns_simd_print_cmd_config(bool cmd, simd_test_t type)
+{
+ char __unused *tstr = simd_type_to_str(type);
+ char __unused *cstr = cmd ? "write rand" : "read and compare";
+
+ if (type == TEST_SME) {
+ if (sme_smstat_sm()) {
+ INFO("TFTF: NS [%s] %s. Config: smcr: 0x%llx, SM: on\n",
+ tstr, cstr, (uint64_t)read_smcr_el2());
+ } else {
+ INFO("TFTF: NS [%s] %s. Config: smcr: 0x%llx, "
+ "zcr: 0x%llx sve_hint: %d SM: off\n", tstr, cstr,
+ (uint64_t)read_smcr_el2(),
+ (uint64_t)sve_read_zcr_elx(),
+ tftf_smc_get_sve_hint());
+ }
+ } else if (type == TEST_SVE) {
+ INFO("TFTF: NS [%s] %s. Config: zcr: 0x%llx, sve_hint: %d\n",
+ tstr, cstr, (uint64_t)sve_read_zcr_elx(),
+ tftf_smc_get_sve_hint());
+ } else {
+ INFO("TFTF: NS [%s] %s\n", tstr, cstr);
+ }
+}
+
+/*
+ * Randomly select TEST_SME or TEST_FPU. For TEST_SME, randomly select below
+ * configurations:
+ * - enable/disable streaming mode
+ * For streaming mode:
+ * - enable or disable FA64
+ * - select random streaming vector length
+ * For normal SVE mode:
+ * - select random normal SVE vector length
+ */
+static simd_test_t ns_sme_select_random_config(void)
+{
+ simd_test_t type;
+ static unsigned int counter;
+
+ /* Use a static counter to mostly select TEST_SME case. */
+ if ((counter % 8U) != 0) {
+ /* Use counter to toggle between Streaming mode on or off */
+ if (is_armv8_2_sve_present() && ((counter % 2U) != 0)) {
+ sme_smstop(SMSTOP_SM);
+ sve_config_vq(SVE_GET_RANDOM_VQ);
+
+ if ((counter % 3U) != 0) {
+ tftf_smc_set_sve_hint(true);
+ } else {
+ tftf_smc_set_sve_hint(false);
+ }
+ } else {
+ sme_smstart(SMSTART_SM);
+ sme_config_svq(SME_GET_RANDOM_SVQ);
+
+ if ((counter % 3U) != 0) {
+ sme_enable_fa64();
+ } else {
+ sme_disable_fa64();
+ }
+ }
+ type = TEST_SME;
+ } else {
+ type = TEST_FPU;
+ }
+ counter++;
+
+ return type;
+}
+
+/*
+ * Randomly select TEST_SVE or TEST_FPU. For TEST_SVE, configure zcr_el2 with
+ * random vector length and randomly enable or disable SMC SVE hint bit.
+ */
+static simd_test_t ns_sve_select_random_config(void)
+{
+ simd_test_t type;
+ static unsigned int counter;
+
+ /* Use a static counter to mostly select TEST_SVE case. */
+ if ((counter % 4U) != 0) {
+ sve_config_vq(SVE_GET_RANDOM_VQ);
+
+ if ((counter % 2U) != 0) {
+ tftf_smc_set_sve_hint(true);
+ } else {
+ tftf_smc_set_sve_hint(false);
+ }
+
+ type = TEST_SVE;
+ } else {
+ type = TEST_FPU;
+ }
+ counter++;
+
+ return type;
+}
+
+/*
+ * Configure NS world SIMD. Randomly choose to test SVE or FPU registers if
+ * system supports SVE.
+ *
+ * Returns either TEST_FPU or TEST_SVE or TEST_SME
+ */
+static simd_test_t ns_simd_select_random_config(void)
+{
+ simd_test_t type;
+
+ /* cleanup old config for SME */
+ if (is_feat_sme_supported()) {
+ sme_smstop(SMSTOP_SM);
+ sme_enable_fa64();
+ }
+
+ /* Cleanup old config for SVE */
+ if (is_armv8_2_sve_present()) {
+ tftf_smc_set_sve_hint(false);
+ }
+
+ if (is_armv8_2_sve_present() && is_feat_sme_supported()) {
+ if (rand() % 2) {
+ type = ns_sme_select_random_config();
+ } else {
+ type = ns_sve_select_random_config();
+ }
+ } else if (is_feat_sme_supported()) {
+ type = ns_sme_select_random_config();
+ } else if (is_armv8_2_sve_present()) {
+ type = ns_sve_select_random_config();
+ } else {
+ type = TEST_FPU;
+ }
+
+ return type;
+}
+
+/* Select random NS SIMD config and write random values to its registers */
+static simd_test_t ns_simd_write_rand(void)
+{
+ simd_test_t type;
+
+ type = ns_simd_select_random_config();
+
+ ns_simd_print_cmd_config(true, type);
+
+ if (type == TEST_SME) {
+ ns_sme_write_rand();
+ } else if (type == TEST_SVE) {
+ ns_sve_write_rand();
+ } else {
+ fpu_q_regs_write_rand(ns_fpu_q_regs_write);
+ }
+
+ /* fpcr, fpsr common to all configs */
+ fpu_cs_regs_write_rand(&ns_fpu_cs_regs_write);
+
+ return type;
+}
+
+/* Read and compare the NS SIMD registers with the last written values */
+static test_result_t ns_simd_read_and_compare(simd_test_t type)
+{
+ test_result_t rc = TEST_RESULT_SUCCESS;
+
+ ns_simd_print_cmd_config(false, type);
+
+ if (type == TEST_SME) {
+ rc = ns_sme_read_and_compare();
+ } else if (type == TEST_SVE) {
+ rc = ns_sve_read_and_compare();
+ } else {
+ fpu_q_regs_read(ns_fpu_q_regs_read);
+ if (fpu_q_regs_compare(ns_fpu_q_regs_write,
+ ns_fpu_q_regs_read)) {
+ ERROR("FPU Q registers compare failed\n");
+ rc = TEST_RESULT_FAIL;
+ }
+ }
+
+ /* fpcr, fpsr common to all configs */
+ fpu_cs_regs_read(&ns_fpu_cs_regs_read);
+ if (fpu_cs_regs_compare(&ns_fpu_cs_regs_write, &ns_fpu_cs_regs_read)) {
+ ERROR("FPCR/FPSR registers compare failed\n");
+ rc = TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/* Select random Realm SIMD config and write random values to its registers */
+static simd_test_t rl_simd_write_rand(bool rl_sve_en)
+{
+ enum realm_cmd rl_fill_cmd;
+ simd_test_t type;
+ bool __unused rc;
+
+ /* Select random commands to test. SVE or FPU registers in Realm */
+ if (rl_sve_en && (rand() % 2)) {
+ type = TEST_SVE;
+ } else {
+ type = TEST_FPU;
+ }
+
+ INFO("TFTF: RL [%s] write random\n", simd_type_to_str(type));
+ if (type == TEST_SVE) {
+ rl_fill_cmd = REALM_SVE_FILL_REGS;
+ } else {
+ rl_fill_cmd = REALM_REQ_FPU_FILL_CMD;
+ }
+
+ rc = host_enter_realm_execute(&realm, rl_fill_cmd, RMI_EXIT_HOST_CALL, 0U);
+ assert(rc);
+
+ return type;
+}
+
+/* Read and compare the Realm SIMD registers with the last written values */
+static bool rl_simd_read_and_compare(simd_test_t type)
+{
+ enum realm_cmd rl_cmp_cmd;
+
+ INFO("TFTF: RL [%s] read and compare\n", simd_type_to_str(type));
+ if (type == TEST_SVE) {
+ rl_cmp_cmd = REALM_SVE_CMP_REGS;
+ } else {
+ rl_cmp_cmd = REALM_REQ_FPU_CMP_CMD;
+ }
+
+ return host_enter_realm_execute(&realm, rl_cmp_cmd, RMI_EXIT_HOST_CALL,
+ 0U);
+}
+
+/*
+ * This test case verifies whether various SIMD related registers like Q[0-31],
+ * FPCR, FPSR, Z[0-31], P[0-15], FFR are preserved by RMM during world switch
+ * between NS world and Realm world.
+ *
+ * Randomly verify FPU registers or SVE registers if the system supports SVE.
+ * Within SVE, randomly configure SVE vector length.
+ *
+ * This testcase runs on below configs:
+ * - SVE only
+ * - SME only
+ * - with SVE and SME
+ * - without SVE and SME
+ */
+test_result_t host_and_realm_check_simd(void)
+{
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ uint8_t sve_vq;
+ bool sve_en;
+ security_state_t sec_state;
+ simd_test_t ns_simd_type, rl_simd_type;
+ unsigned int test_iterations;
+ unsigned int num_simd_types;
+ unsigned int num_simd_configs;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (host_rmi_features(0UL, &rmi_feat_reg0) != REALM_SUCCESS) {
+ ERROR("Failed to get RMI feat_reg0\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ sve_en = rmi_feat_reg0 & RMI_FEATURE_REGISTER_0_SVE_EN;
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ /* Create Realm with SVE enabled if RMI features supports it */
+ INFO("TFTF: create realm sve_en/sve_vq: %d/%d\n", sve_en, sve_vq);
+ rc = host_create_sve_realm_payload(sve_en, sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ /*
+ * Randomly select and configure NS simd context to test. And fill it
+ * with random values.
+ */
+ ns_simd_type = ns_simd_write_rand();
+
+ /*
+ * Randomly select and configure Realm simd context to test. Enter realm
+ * and fill simd context with random values.
+ */
+ rl_simd_type = rl_simd_write_rand(sve_en);
+ sec_state = REALM_WORLD;
+
+ /*
+ * Find out test iterations based on if SVE is enabled and the number of
+ * configurations available in the SVE.
+ */
+
+ /* FPU is always available */
+ num_simd_types = 1U;
+ num_simd_configs = NUM_FPU_CONFIGS;
+
+ if (is_armv8_2_sve_present()) {
+ num_simd_types += 1;
+ num_simd_configs += NUM_SVE_CONFIGS;
+ }
+
+ if (is_feat_sme_supported()) {
+ num_simd_types += 1;
+ num_simd_configs += NUM_SME_CONFIGS;
+ }
+
+ if (num_simd_configs) {
+ test_iterations = TEST_ITERATIONS_MIN * num_simd_types *
+ num_simd_configs;
+ } else {
+ test_iterations = TEST_ITERATIONS_MIN * num_simd_types;
+ }
+
+ for (uint32_t i = 0U; i < test_iterations; i++) {
+ if (sec_state == NONSECURE_WORLD) {
+ sec_state = REALM_WORLD;
+ } else {
+ sec_state = NONSECURE_WORLD;
+ }
+
+ switch (sec_state) {
+ case NONSECURE_WORLD:
+ /*
+ * Read NS simd context and compare it with last written
+ * context.
+ */
+ rc = ns_simd_read_and_compare(ns_simd_type);
+ if (rc != TEST_RESULT_SUCCESS) {
+ goto rm_realm;
+ }
+
+ /*
+ * Randomly select and configure NS simd context. And
+ * fill it with random values for the next compare.
+ */
+ ns_simd_type = ns_simd_write_rand();
+ break;
+ case REALM_WORLD:
+ /*
+ * Enter Realm and read the simd context and compare it
+ * with last written context.
+ */
+ if (!rl_simd_read_and_compare(rl_simd_type)) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+
+ /*
+ * Randomly select and configure Realm simd context to
+ * test. Enter realm and fill simd context with random
+ * values for the next compare.
+ */
+ rl_simd_type = rl_simd_write_rand(sve_en);
+ break;
+ default:
+ break;
+ }
+ }
+
+ rc = TEST_RESULT_SUCCESS;
+rm_realm:
+ /* Cleanup old config */
+ if (is_feat_sme_supported()) {
+ sme_smstop(SMSTOP_SM);
+ sme_enable_fa64();
+ }
+
+ /* Cleanup old config */
+ if (is_armv8_2_sve_present()) {
+ tftf_smc_set_sve_hint(false);
+ }
+
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/*
+ * Create a Realm and check SME specific ID registers. Realm must report SME
+ * not present in ID_AA64PFR1_EL1 and no SME features present in
+ * ID_AA64SMFR0_EL1
+ */
+test_result_t host_realm_check_sme_id_registers(void)
+{
+ host_shared_data_t *sd;
+ struct sme_cmd_id_regs *r_regs;
+ test_result_t rc;
+ bool realm_rc;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ rc = host_create_sve_realm_payload(false, 0);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ realm_rc = host_enter_realm_execute(&realm, REALM_SME_ID_REGISTERS,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+
+ sd = host_get_shared_structure(&realm, 0U);
+ r_regs = (struct sme_cmd_id_regs *)sd->realm_cmd_output_buffer;
+
+ /* Check ID register SME flags */
+ rc = TEST_RESULT_SUCCESS;
+ if (EXTRACT(ID_AA64PFR1_EL1_SME, r_regs->id_aa64pfr1_el1) >=
+ ID_AA64PFR1_EL1_SME_SUPPORTED) {
+ ERROR("ID_AA64PFR1_EL1: SME enabled\n");
+ rc = TEST_RESULT_FAIL;
+ }
+ if (r_regs->id_aa64smfr0_el1 != 0UL) {
+ ERROR("ID_AA64SMFR0_EL1: Realm reported non-zero value\n");
+ rc = TEST_RESULT_FAIL;
+ }
+
+rm_realm:
+ host_destroy_realm(&realm);
+ return rc;
+}
+
+/*
+ * Create a Realm and try to access SME, the Realm must receive undefined abort.
+ */
+test_result_t host_realm_check_sme_undef_abort(void)
+{
+ test_result_t rc;
+ bool realm_rc;
+
+ SKIP_TEST_IF_SME_NOT_SUPPORTED();
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ rc = host_create_sve_realm_payload(false, 0);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ realm_rc = host_enter_realm_execute(&realm, REALM_SME_UNDEF_ABORT,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ ERROR("Realm didn't receive undefined abort\n");
+ rc = TEST_RESULT_FAIL;
+ } else {
+ rc = TEST_RESULT_SUCCESS;
+ }
+
+ host_destroy_realm(&realm);
+ return rc;
+}
+
+/*
+ * Check whether RMM preserves NS SME config values and flags
+ * 1. SMCR_EL2.LEN field
+ * 2. SMCR_EL2.FA64 flag
+ * 3. Streaming SVE mode status
+ *
+ * This test case runs for SVE + SME config and SME only config and skipped for
+ * non SME config.
+ */
+test_result_t host_realm_check_sme_configs(void)
+{
+ u_register_t ns_smcr_el2, ns_smcr_el2_cur;
+ u_register_t rmi_feat_reg0;
+ bool ssve_mode;
+ test_result_t rc;
+ uint8_t sve_vq;
+ uint8_t sme_svq;
+ bool sve_en;
+
+ SKIP_TEST_IF_SME_NOT_SUPPORTED();
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (is_armv8_2_sve_present()) {
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+ sve_en = true;
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+ } else {
+ sve_en = false;
+ sve_vq = 0;
+ }
+
+ rc = host_create_sve_realm_payload(sve_en, sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ /*
+ * Configure TFTF from 0 to SME_SVQ_ARCH_MAX, and in each iteration
+ * randomly enable or disable FA64 and Streaming SVE mode. Ater calling
+ * Realm, check the NS SME configuration status.
+ */
+ rc = TEST_RESULT_SUCCESS;
+ for (sme_svq = 0U; sme_svq <= SME_SVQ_ARCH_MAX; sme_svq++) {
+ bool realm_rc;
+
+ sme_config_svq(sme_svq);
+
+ /* randomly enable or disable FEAT_SME_FA64 */
+ if (sme_svq % 2) {
+ sme_enable_fa64();
+ sme_smstart(SMSTART_SM);
+ ssve_mode = true;
+ } else {
+ sme_disable_fa64();
+ sme_smstop(SMSTOP_SM);
+ ssve_mode = false;
+ }
+
+ ns_smcr_el2 = read_smcr_el2();
+
+ /*
+ * If SVE is supported then we would have created a Realm with
+ * SVE support, so run SVE command else run FPU command
+ */
+ if (sve_en) {
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_RDVL,
+ RMI_EXIT_HOST_CALL,
+ 0U);
+ } else {
+ realm_rc = host_enter_realm_execute(&realm,
+ REALM_REQ_FPU_FILL_CMD,
+ RMI_EXIT_HOST_CALL, 0U);
+ }
+
+ if (!realm_rc) {
+ ERROR("Realm command REALM_SVE_RDVL failed\n");
+ rc = TEST_RESULT_FAIL;
+ break;
+ }
+ ns_smcr_el2_cur = read_smcr_el2();
+
+ if (ns_smcr_el2 != ns_smcr_el2_cur) {
+ ERROR("NS SMCR_EL2 expected: 0x%lx, got: 0x%lx\n",
+ ns_smcr_el2, ns_smcr_el2_cur);
+ rc = TEST_RESULT_FAIL;
+ }
+
+ if (sme_smstat_sm() != ssve_mode) {
+ if (ssve_mode) {
+ ERROR("NS Streaming SVE mode is disabled\n");
+ } else {
+ ERROR("NS Streaming SVE mode is enabled\n");
+ }
+
+ rc = TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Exit Streaming SVE mode if test case enabled it */
+ if (ssve_mode) {
+ sme_smstop(SMSTOP_SM);
+ }
+
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
diff --git a/tftf/tests/runtime_services/realm_payload/host_realm_payload_tests.c b/tftf/tests/runtime_services/realm_payload/host_realm_payload_tests.c
new file mode 100644
index 000000000..ff698692d
--- /dev/null
+++ b/tftf/tests/runtime_services/realm_payload/host_realm_payload_tests.c
@@ -0,0 +1,1275 @@
+/*
+ * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <assert.h>
+#include <arch_features.h>
+#include <debug.h>
+#include <irq.h>
+#include <drivers/arm/arm_gic.h>
+#include <drivers/arm/gic_v3.h>
+#include <heap/page_alloc.h>
+#include <pauth.h>
+#include <test_helpers.h>
+
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_realm_pmu.h>
+#include <host_shared_data.h>
+
+#define SLEEP_TIME_MS 20U
+
+extern const char *rmi_exit[];
+
+#if ENABLE_PAUTH
+static uint128_t pauth_keys_before[NUM_KEYS];
+static uint128_t pauth_keys_after[NUM_KEYS];
+#endif
+
+/*
+ * @Test_Aim@ Test realm payload creation, execution and destruction iteratively
+ */
+test_result_t host_test_realm_create_enter(void)
+{
+ bool ret1, ret2;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+ struct realm realm;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ for (unsigned int i = 0U; i < 5U; i++) {
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, SLEEP_TIME_MS);
+ ret1 = host_enter_realm_execute(&realm, REALM_SLEEP_CMD, RMI_EXIT_HOST_CALL, 0U);
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return host_cmp_result();
+}
+
+/*
+ * @Test_Aim@ Test realm payload creation and execution
+ */
+test_result_t host_test_realm_rsi_version(void)
+{
+ bool ret1, ret2;
+ u_register_t rec_flag[] = {RMI_RUNNABLE};
+ struct realm realm;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret1 = host_enter_realm_execute(&realm, REALM_GET_RSI_VERSION, RMI_EXIT_HOST_CALL, 0U);
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+}
+
+/*
+ * @Test_Aim@ Create realm with multiple rec
+ * Test PAuth registers are preserved for each rec
+ */
+test_result_t host_realm_enable_pauth(void)
+{
+#if ENABLE_PAUTH == 0
+ return TEST_RESULT_SKIPPED;
+#else
+ bool ret1, ret2;
+ u_register_t rec_flag[MAX_REC_COUNT] = {RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,
+ RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,};
+ struct realm realm;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ pauth_test_lib_fill_regs_and_template(pauth_keys_before);
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, MAX_REC_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ for (unsigned int i = 0U; i < MAX_REC_COUNT; i++) {
+ ret1 = host_enter_realm_execute(&realm, REALM_PAUTH_SET_CMD,
+ RMI_EXIT_HOST_CALL, i);
+
+ if (!ret1) {
+ ERROR("Pauth set cmd failed\n");
+ break;
+ }
+ /* Re-enter Realm to compare PAuth registers. */
+ ret1 = host_enter_realm_execute(&realm, REALM_PAUTH_CHECK_CMD,
+ RMI_EXIT_HOST_CALL, i);
+ if (!ret1) {
+ ERROR("Pauth check cmd failed\n");
+ break;
+ }
+ }
+
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check if PAuth keys are preserved. */
+ if (!pauth_test_lib_compare_template(pauth_keys_before, pauth_keys_after)) {
+ ERROR("%s(): NS PAuth keys not preserved\n",
+ __func__);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+#endif
+}
+
+/*
+ * @Test_Aim@ Test PAuth fault in Realm
+ */
+test_result_t host_realm_pauth_fault(void)
+{
+#if ENABLE_PAUTH == 0
+ return TEST_RESULT_SKIPPED;
+#else
+ bool ret1, ret2;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+ struct realm realm;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret1 = host_enter_realm_execute(&realm, REALM_PAUTH_FAULT, RMI_EXIT_HOST_CALL, 0U);
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret1) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+#endif
+}
+
+/*
+ * This function is called on REC exit due to IRQ.
+ * By checking Realm PMU state in RecExit object this finction
+ * detects if the exit was caused by PMU interrupt. In that
+ * case it disables physical PMU interrupt and sets virtual
+ * PMU interrupt pending by writing to gicv3_lrs attribute
+ * of RecEntry object and re-enters the Realm.
+ *
+ * @return true in case of PMU interrupt, false otherwise.
+ */
+static bool host_realm_handle_irq_exit(struct realm *realm_ptr,
+ unsigned int rec_num)
+{
+ struct rmi_rec_run *run = (struct rmi_rec_run *)realm_ptr->run[rec_num];
+
+ /* Check PMU overflow status */
+ if (run->exit.pmu_ovf_status == RMI_PMU_OVERFLOW_ACTIVE) {
+ unsigned int host_call_result;
+ u_register_t exit_reason, retrmm;
+ int ret;
+
+ tftf_irq_disable(PMU_PPI);
+ ret = tftf_irq_unregister_handler(PMU_PPI);
+ if (ret != 0) {
+ ERROR("Failed to %sregister IRQ handler\n", "un");
+ return false;
+ }
+
+ /* Inject PMU virtual interrupt */
+ run->entry.gicv3_lrs[0] =
+ ICH_LRn_EL2_STATE_Pending | ICH_LRn_EL2_Group_1 |
+ (PMU_VIRQ << ICH_LRn_EL2_vINTID_SHIFT);
+
+ /* Re-enter Realm */
+ INFO("Re-entering Realm with vIRQ %lu pending\n", PMU_VIRQ);
+
+ retrmm = host_realm_rec_enter(realm_ptr, &exit_reason,
+ &host_call_result, rec_num);
+ if ((retrmm == REALM_SUCCESS) &&
+ (exit_reason == RMI_EXIT_HOST_CALL) &&
+ (host_call_result == TEST_RESULT_SUCCESS)) {
+ return true;
+ }
+
+ ERROR("%s() failed, ret=%lx host_call_result %u\n",
+ "host_realm_rec_enter", retrmm, host_call_result);
+ }
+ return false;
+}
+
+/*
+ * @Test_Aim@ Test realm PMU
+ *
+ * This function tests PMU functionality in Realm
+ *
+ * @cmd: PMU test number
+ * @return test result
+ */
+static test_result_t host_test_realm_pmuv3(uint8_t cmd)
+{
+ struct realm realm;
+ u_register_t feature_flag;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+ bool ret1, ret2;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ host_set_pmu_state();
+
+ feature_flag = RMI_FEATURE_REGISTER_0_PMU_EN |
+ INPLACE(FEATURE_PMU_NUM_CTRS, (unsigned long long)(-1));
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ feature_flag, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret1 = host_enter_realm_execute(&realm, cmd,
+ (cmd == REALM_PMU_INTERRUPT) ?
+ RMI_EXIT_IRQ : RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1 || (cmd != REALM_PMU_INTERRUPT)) {
+ goto test_exit;
+ }
+
+ ret1 = host_realm_handle_irq_exit(&realm, 0U);
+
+test_exit:
+ ret2 = host_destroy_realm(&realm);
+ if (!ret1 || !ret2) {
+ ERROR("%s() enter=%u destroy=%u\n", __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_check_pmu_state()) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+}
+
+/*
+ * Test if the cycle counter works in Realm with NOPs execution
+ */
+test_result_t host_realm_pmuv3_cycle_works(void)
+{
+ return host_test_realm_pmuv3(REALM_PMU_CYCLE);
+}
+
+/*
+ * Test if the event counter works in Realm with NOPs execution
+ */
+test_result_t host_realm_pmuv3_event_works(void)
+{
+ return host_test_realm_pmuv3(REALM_PMU_EVENT);
+}
+
+/*
+ * Test if Realm entering/exiting RMM preserves PMU state
+ */
+test_result_t host_realm_pmuv3_rmm_preserves(void)
+{
+ return host_test_realm_pmuv3(REALM_PMU_PRESERVE);
+}
+
+/*
+ * IRQ handler for PMU_PPI #23.
+ * This handler should not be called, as RMM handles IRQs.
+ */
+static int host_overflow_interrupt(void *data)
+{
+ (void)data;
+
+ assert(false);
+ return -1;
+}
+
+/*
+ * Test PMU interrupt functionality in Realm
+ */
+test_result_t host_realm_pmuv3_overflow_interrupt(void)
+{
+ /* Register PMU IRQ handler */
+ int ret = tftf_irq_register_handler(PMU_PPI, host_overflow_interrupt);
+
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to %sregister IRQ handler\n",
+ "");
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_irq_enable(PMU_PPI, GIC_HIGHEST_NS_PRIORITY);
+ return host_test_realm_pmuv3(REALM_PMU_INTERRUPT);
+}
+
+/*
+ * Test aim to create, enter and destroy 2 realms
+ * Host created 2 realms with 1 rec each
+ * Host enters both rec sequentially
+ * Verifies both realm returned success
+ * Destroys both realms
+ */
+test_result_t host_test_multiple_realm_create_enter(void)
+{
+ bool ret1, ret2, ret3;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+ struct realm realm1, realm2;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm1, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+
+
+ if (!host_create_activate_realm_payload(&realm2, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE + PAGE_POOL_MAX_SIZE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ ret2 = host_destroy_realm(&realm1);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_create_shared_mem(&realm1, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ ret1 = false;
+ goto destroy_realms;
+ }
+
+ if (!host_create_shared_mem(&realm2, NS_REALM_SHARED_MEM_BASE +
+ NS_REALM_SHARED_MEM_SIZE, NS_REALM_SHARED_MEM_SIZE)) {
+ ret1 = false;
+ goto destroy_realms;
+ }
+
+ host_shared_data_set_host_val(&realm1, 0U, HOST_ARG1_INDEX, SLEEP_TIME_MS);
+ ret1 = host_enter_realm_execute(&realm1, REALM_SLEEP_CMD, RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1) {
+ goto destroy_realms;
+ }
+ host_shared_data_set_host_val(&realm2, 0U, HOST_ARG1_INDEX, SLEEP_TIME_MS);
+ ret1 = host_enter_realm_execute(&realm2, REALM_SLEEP_CMD, RMI_EXIT_HOST_CALL, 0U);
+
+destroy_realms:
+ ret2 = host_destroy_realm(&realm1);
+ ret3 = host_destroy_realm(&realm2);
+
+ if (!ret3 || !ret2) {
+ ERROR("destroy failed\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!ret1) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Test set_ripas functionality in Realm
+ * Test allocates 3 PAGES and passes to Realm
+ * Realm: verifies that initial RIPAS of these pages is EMPTY
+ * Realm: requests RIPAS Change to RAM
+ * Host: attempt to change RIPAS outside requested range, verifies error generated by RMM
+ * Host: changes RIPAS of first PAGE and re-enters Realm
+ * Realm: tracks progress and requests RIPAS Change to RAM till all pages are complete
+ * Host: changes RIPAS of requested PAGE and re-enters Realm
+ * Realm: verifies all PAGES are set to RIPAS=RAM
+ */
+test_result_t host_realm_set_ripas(void)
+{
+ bool ret1, ret2;
+ u_register_t ret, base, new_base, exit_reason;
+ unsigned int host_call_result = TEST_RESULT_FAIL;
+ struct realm realm;
+ struct rmi_rec_run *run;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+ u_register_t test_page_num = 3U;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, 10U);
+ ret1 = host_enter_realm_execute(&realm, REALM_SLEEP_CMD, RMI_EXIT_HOST_CALL, 0U);
+ base = (u_register_t)page_alloc(PAGE_SIZE * test_page_num);
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG2_INDEX,
+ base + (PAGE_SIZE * test_page_num));
+
+ for (unsigned int i = 0U; i < test_page_num; i++) {
+ ret = host_realm_delegate_map_protected_data(true, &realm,
+ base + (PAGE_SIZE * i), PAGE_SIZE,
+ base + (PAGE_SIZE * i));
+ if (ret != REALM_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failed\n");
+ goto destroy_realm;
+ }
+ }
+ ret1 = host_enter_realm_execute(&realm, REALM_SET_RIPAS_CMD,
+ RMI_EXIT_RIPAS_CHANGE, 0U);
+ if (!ret1) {
+ ERROR("Rec enter failed\n");
+ goto destroy_realm;
+ }
+ run = (struct rmi_rec_run *)realm.run[0U];
+
+ /* Attemp to set ripas for IPA out of requested range, expect error */
+ ret = host_rmi_rtt_set_ripas(realm.rd,
+ realm.rec[0U],
+ run->exit.ripas_base - PAGE_SIZE,
+ run->exit.ripas_base,
+ &new_base);
+ if (ret != RMI_ERROR_INPUT || new_base != 0U) {
+ ERROR("host_rmi_rtt_set_ripas should have failed ret = 0x%lx\n", ret);
+ goto destroy_realm;
+ }
+
+ while (run->exit.ripas_base <= base + (PAGE_SIZE * test_page_num)) {
+ INFO("host_rmi_rtt_set_ripas ripas_base=0x%lx\n",
+ run->exit.ripas_base);
+ ret = host_rmi_rtt_set_ripas(realm.rd,
+ realm.rec[0U],
+ run->exit.ripas_base,
+ run->exit.ripas_base + PAGE_SIZE,
+ &new_base);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_rtt_set_ripas failed ret = 0x%lx\n", ret);
+ goto destroy_realm;
+ }
+ ret = host_realm_rec_enter(&realm,
+ &exit_reason, &host_call_result, 0U);
+ if (ret != RMI_SUCCESS || exit_reason != RMI_EXIT_RIPAS_CHANGE) {
+ goto destroy_realm;
+ }
+ }
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ page_free(base);
+ return host_call_result;
+}
+
+/*
+ * Test set_ripas reject functionality in Realm
+ * Test allocates PAGE and passes to Realm
+ * Realm: verifies that initial RIPAS of page is EMPTY
+ * Realm: requests RIPAS Change to RAM
+ * Host: changes rejects RIPAS change and enters Realm
+ * Realm: verifies REJECT response
+ * Realm: verifies PAGE has RIPAS=EMPTY
+ */
+
+test_result_t host_realm_reject_set_ripas(void)
+{
+ bool ret1, ret2;
+ u_register_t ret, exit_reason;
+ unsigned int host_call_result = TEST_RESULT_FAIL;
+ struct realm realm;
+ struct rmi_rec_run *run;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE}, base;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+
+ ret = host_realm_delegate_map_protected_data(true, &realm, base, PAGE_SIZE, base);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failede\n");
+ goto destroy_realm;
+ }
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base);
+ ret1 = host_enter_realm_execute(&realm, REALM_REJECT_SET_RIPAS_CMD,
+ RMI_EXIT_RIPAS_CHANGE, 0U);
+
+ if (!ret1) {
+ ERROR("Rec did not request RIPAS change\n");
+ goto destroy_realm;
+ }
+ run = (struct rmi_rec_run *)realm.run[0];
+ if (run->exit.ripas_base != base) {
+ ERROR("Rec requested wrong exit.ripas_base\n");
+ goto destroy_realm;
+ }
+ run->entry.flags = REC_ENTRY_FLAG_RIPAS_RESPONSE_REJECT;
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 0U);
+ if (ret != RMI_SUCCESS || exit_reason != RMI_EXIT_HOST_CALL) {
+ ERROR("Re-enter rec failed exit_reason=0x%lx", exit_reason);
+ }
+
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_call_result;
+}
+
+/*
+ * Test aims to generate REALM Exit due to abort
+ * when access page with RIPAS=DESTOYED HIPAS=ASSIGNED
+ * Host maps a protected page (calls data_create) when realm is in new state
+ * Initial state of PAGE is RIPAS=RAM HIPAS=ASSIGNED
+ * Host calls data_destroy, new state HIPAS=UNASSIGNED RIPAS=DESTROYED
+ * Enter Realm, Rec0 executes from page, and Rec1 reads the page
+ * Realm should trigger an Instr/Data abort, and will exit to Host.
+ * The Host verifies exit reason is Instr/Data abort
+ */
+test_result_t host_realm_abort_unassigned_destroyed(void)
+{
+ bool ret1, ret2;
+ test_result_t res = TEST_RESULT_FAIL;
+ u_register_t ret, data, top;
+ struct realm realm;
+ struct rmi_rec_run *run;
+ struct rtt_entry rtt;
+ u_register_t rec_flag[2U] = {RMI_RUNNABLE, RMI_RUNNABLE}, base;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 2U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+
+ run = (struct rmi_rec_run *)realm.run[0];
+
+ /* DATA_CREATE
+ * Copies content of TFTF_BASE in newly created page, any PA can be used for dummy copy
+ * maps 1:1 IPA:PA
+ */
+ ret = host_realm_delegate_map_protected_data(false, &realm, base, PAGE_SIZE, TFTF_BASE);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failed\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_ASSIGNED ||
+ (rtt.ripas != RMI_RAM)) {
+ ERROR("wrong state after DATA_CRATE_UNKNOWN\n");
+ goto undelegate_destroy;
+ }
+ INFO("Initial state base = 0x%lx rtt.state=0x%lx rtt.ripas=0x%lx\n",
+ base, rtt.state, rtt.ripas);
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 1U, HOST_ARG1_INDEX, base);
+
+ ret = host_rmi_data_destroy(realm.rd, base, &data, &top);
+ if (ret != RMI_SUCCESS || data != base) {
+ ERROR("host_rmi_data_destroy failed\n");
+ goto undelegate_destroy;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ rtt.ripas != RMI_DESTROYED) {
+ ERROR("Wrong state after host_rmi_data_destroy\n");
+ goto undelegate_destroy;
+ }
+
+ INFO("New state4 base = 0x%lx rtt.state=0x%lx rtt.ripas=0x%lx\n",
+ base, rtt.state, rtt.ripas);
+
+ if (host_realm_activate(&realm) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_activate");
+ goto undelegate_destroy;
+ }
+
+ /* Realm0 expect rec exit due to Instr Abort unassigned destroyed page */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_SYNC, 0U);
+
+ /* ESR.EC == 0b100000 Instruction Abort from a lower Exception level */
+ if (!ret1 || ((run->exit.hpfar >> 4U) != (base >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_IABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_IFSC_MASK) < IFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_IFSC_MASK) > IFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U))) {
+ ERROR("Rec did not fault ESR=0x%lx\n", run->exit.esr);
+ goto undelegate_destroy;
+ }
+ INFO("IA FAR=0x%lx, HPFAR=0x%lx ESR=0x%lx\n", run->exit.far, run->exit.hpfar,
+ run->exit.esr);
+
+ run = (struct rmi_rec_run *)realm.run[1];
+
+ /* Realm1 expect rec exit due to Data Abort unassigned destroyed page */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_SYNC, 1U);
+
+ /* ESR.EC == 0b100100 Data Abort exception from a lower Exception level */
+ if (!ret1 || ((run->exit.hpfar >> 4U) != (base >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_DABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_DFSC_MASK) < DFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_DFSC_MASK) > DFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U))) {
+ ERROR("Rec did not fault\n");
+ goto undelegate_destroy;
+ }
+ INFO("DA FAR=0x%lx, HPFAR=0x%lx ESR= 0x%lx\n", run->exit.far, run->exit.hpfar,
+ run->exit.esr);
+ res = TEST_RESULT_SUCCESS;
+
+undelegate_destroy:
+ ret = host_rmi_granule_undelegate(base);
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
+
+/*
+ * Test aims to generate REALM Exit due to Abort
+ * when access page with RIPAS=RAM HIPAS=UNASSIGNED
+ * Host allocates a PAGE, calls init_ripas when realm is in new state
+ * Initial state of PAGE is RIPAS=RAM HIPAS=UNASSIGNED
+ * Enter Realm, REC0 executes from page, and REC1 reads the page
+ * Realm should trigger an Instr/Data abort, and will exit to Host.
+ * Host verifies exit reason is Instr/Data abort.
+ */
+test_result_t host_realm_abort_unassigned_ram(void)
+{
+ bool ret1, ret2;
+ u_register_t ret, top;
+ struct realm realm;
+ struct rmi_rec_run *run;
+ struct rtt_entry rtt;
+ test_result_t res = TEST_RESULT_FAIL;
+ u_register_t rec_flag[2U] = {RMI_RUNNABLE, RMI_RUNNABLE}, base;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 2U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ /* This is dummy allocation to get a base address */
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+
+ run = (struct rmi_rec_run *)realm.run[0];
+
+ /* Set RIPAS of PAGE to RAM */
+ ret = host_rmi_rtt_init_ripas(realm.rd, base, base + PAGE_SIZE, &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx line=%u\n",
+ "host_rmi_rtt_init_ripas", ret, __LINE__);
+ goto destroy_realm;
+ }
+ if (host_realm_activate(&realm) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_activate");
+ goto destroy_realm;
+ }
+
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ (rtt.ripas != RMI_RAM)) {
+ ERROR("wrong initial state\n");
+ goto destroy_realm;
+ }
+ INFO("Initial state base = 0x%lx rtt.state=0x%lx rtt.ripas=0x%lx\n",
+ base, rtt.state, rtt.ripas);
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 1U, HOST_ARG1_INDEX, base);
+
+ /* Rec0 expect rec exit due to Instr Abort unassigned ram page */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_SYNC, 0U);
+
+ /* ESR.EC == 0b100000 Instruction Abort from a lower Exception level */
+ if (!ret1 || ((run->exit.hpfar >> 4U) != (base >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_IABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_IFSC_MASK) < IFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_IFSC_MASK) > IFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U))) {
+ ERROR("Rec did not fault ESR=0x%lx\n", run->exit.esr);
+ goto destroy_realm;
+ }
+ INFO("IA FAR=0x%lx, HPFAR=0x%lx ESR=0x%lx\n", run->exit.far, run->exit.hpfar,
+ run->exit.esr);
+ run = (struct rmi_rec_run *)realm.run[1];
+
+ /* Rec1 expect rec exit due to Data Abort unassigned ram page */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_SYNC, 1U);
+
+ /* ESR.EC == 0b100100 Data Abort exception from a lower Exception level */
+ if (!ret1 || ((run->exit.hpfar >> 4U) != (base >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_DABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_DFSC_MASK) < DFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_DFSC_MASK) > DFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U))) {
+ ERROR("Rec did not fault ESR=0x%lx\n", run->exit.esr);
+ goto destroy_realm;
+ }
+ INFO("DA FAR=0x%lx, HPFAR=0x%lx ESR=0x%lx\n", run->exit.far, run->exit.hpfar,
+ run->exit.esr);
+ res = TEST_RESULT_SUCCESS;
+
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
+
+/*
+ * Test aims to generate REALM Exit due to Abort
+ * when access page with RIPAS=DESTOYED HIPAS=Assigned
+ * Host maps a protected page (calls data_create) when realm is in new state
+ * initial state of PAGE is RIPAS=RAM HIPAS=ASSIGNED
+ * Host calls data_destroy, new state HIPAS=UNASSIGNED RIPAS=DESTROYED
+ * Host calls data_create_unknown, new state HIPAS=ASSIGNED RIPAS=DESTROYED
+ * Enter Realm, REC0 executes from page, and REC1 reads the page
+ * Realm should trigger an Instr/Data abort, and will exit to Host.
+ * The Host verifies exit reason is Instr/Data abort
+ */
+test_result_t host_realm_abort_assigned_destroyed(void)
+{
+ bool ret1, ret2;
+ test_result_t res = TEST_RESULT_FAIL;
+ u_register_t ret, top, data;
+ struct realm realm;
+ struct rmi_rec_run *run;
+ struct rtt_entry rtt;
+ u_register_t rec_flag[2U] = {RMI_RUNNABLE, RMI_RUNNABLE}, base;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 2U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+ run = (struct rmi_rec_run *)realm.run[0];
+
+ /* DATA_CREATE */
+ /* Copied content of TFTF_BASE to new page, can use any adr, maps 1:1 IPA:PA */
+ ret = host_realm_delegate_map_protected_data(false, &realm, base, PAGE_SIZE, TFTF_BASE);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failed\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_ASSIGNED ||
+ (rtt.ripas != RMI_RAM)) {
+ ERROR("wrong state after data create\n");
+ goto destroy_realm;
+ }
+ INFO("Initial state base = 0x%lx rtt.state=0x%lx rtt.ripas=0x%lx\n",
+ base, rtt.state, rtt.ripas);
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 1U, HOST_ARG1_INDEX, base);
+
+ if (host_realm_activate(&realm) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_activate");
+ goto destroy_realm;
+ }
+
+ ret = host_rmi_data_destroy(realm.rd, base, &data, &top);
+ if (ret != RMI_SUCCESS || data != base) {
+ ERROR("host_rmi_data_destroy failed\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ rtt.ripas != RMI_DESTROYED) {
+ ERROR("Wrong state after host_rmi_data_destroy\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_granule_undelegate(base);
+
+ /* DATA_CREATE_UNKNOWN */
+ ret = host_realm_delegate_map_protected_data(true, &realm, base, PAGE_SIZE, 0U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failede\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_ASSIGNED ||
+ (rtt.ripas != RMI_DESTROYED)) {
+ ERROR("wrong state after data create unknown\n");
+ goto destroy_data;
+ }
+
+ /* Rec0, expect rec exit due to Instr Abort assigned destroyed page */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_SYNC, 0U);
+
+ /* ESR.EC == 0b100000 Instruction Abort from a lower Exception level */
+ if (!ret1 || ((run->exit.hpfar >> 4U) != (base >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_IABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_IFSC_MASK) < IFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_IFSC_MASK) > IFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U))) {
+ ERROR("Rec did not fault ESR=0x%lx\n", run->exit.esr);
+ goto destroy_data;
+ }
+ INFO("IA FAR=0x%lx, HPFAR=0x%lx ESR=0x%lx\n", run->exit.far, run->exit.hpfar,
+ run->exit.esr);
+ run = (struct rmi_rec_run *)realm.run[1];
+
+ /* Rec1 expect rec exit due to Data Abort assigned destroyed page */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_SYNC, 1U);
+
+ /* ESR.EC == 0b100100 Data Abort exception from a lower Exception level */
+ if (!ret1 || ((run->exit.hpfar >> 4U) != (base >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_DABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_DFSC_MASK) < DFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_DFSC_MASK) > DFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U))) {
+ ERROR("Rec did not fault ESR=0x%lx\n", run->exit.esr);
+ goto destroy_data;
+ }
+ INFO("DA FAR=0x%lx, HPFAR=0x%lx ESR=0x%lx\n", run->exit.far, run->exit.hpfar,
+ run->exit.esr);
+ res = TEST_RESULT_SUCCESS;
+
+destroy_data:
+ ret = host_rmi_data_destroy(realm.rd, base, &data, &top);
+ ret = host_rmi_granule_undelegate(base);
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
+
+/*
+ * Test aims to generate SEA in Realm by accessing
+ * PAGE with HIPAS=assigned/unassigned and RIPAS=EMPTY
+ * Host creates and executes 4 recs to generate SEA
+ * Rec exception handler runs and returns back ESR to Host
+ * Host validates ESR
+ * Rec0 generated IA unassigned empty
+ * Rec1 generated DA unassigned empty
+ * Rec2 generated IA for assigned empty
+ * Rec3 generated DA for assigned empty
+ */
+test_result_t host_realm_sea_empty(void)
+{
+ bool ret1, ret2;
+ test_result_t res = TEST_RESULT_FAIL;
+ u_register_t ret, base, esr;
+ struct realm realm;
+ struct rtt_entry rtt;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE};
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 4U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (rtt.state != RMI_UNASSIGNED ||
+ (rtt.ripas != RMI_EMPTY)) {
+ ERROR("wrong initial state\n");
+ goto destroy_realm;
+ }
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 1U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 2U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 3U, HOST_ARG1_INDEX, base);
+
+ /* Rec0 expect IA due to SEA unassigned empty page */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1) {
+ ERROR("Rec0 did not fault\n");
+ goto destroy_realm;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 0U, HOST_ARG2_INDEX);
+ if (((esr & ISS_IFSC_MASK) != IFSC_NO_WALK_SEA) || (EC_BITS(esr) != EC_IABORT_CUR_EL)) {
+ ERROR("Rec0 incorrect ESR=0x%lx\n", esr);
+ goto destroy_realm;
+ }
+ INFO("Rec0 ESR=0x%lx\n", esr);
+
+ /* Rec1 expect DA due to SEA unassigned empty page */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_HOST_CALL, 1U);
+ if (!ret1) {
+ ERROR("Rec1 did not fault\n");
+ goto destroy_realm;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 1U, HOST_ARG2_INDEX);
+ if (((esr & ISS_DFSC_MASK) != DFSC_NO_WALK_SEA) || (EC_BITS(esr) != EC_DABORT_CUR_EL)) {
+ ERROR("Rec1 incorrect ESR=0x%lx\n", esr);
+ goto destroy_realm;
+ }
+ INFO("Rec1 ESR=0x%lx\n", esr);
+
+ /* DATA_CREATE_UNKNOWN */
+ ret = host_realm_delegate_map_protected_data(true, &realm, base, PAGE_SIZE, 0U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_map_protected_data failed\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (rtt.state != RMI_ASSIGNED ||
+ (rtt.ripas != RMI_EMPTY)) {
+ ERROR("wrong state after DATA_CRATE_UNKNOWN\n");
+ goto undelegate_destroy;
+ }
+ INFO("state base = 0x%lx rtt.state=0x%lx rtt.ripas=0x%lx\n",
+ base, rtt.state, rtt.ripas);
+
+ /* Rec2 expect IA due to SEA assigned empty page */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_HOST_CALL, 2U);
+
+ if (!ret1) {
+ ERROR("Rec2 did not fault\n");
+ goto undelegate_destroy;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 2U, HOST_ARG2_INDEX);
+ if (((esr & ISS_IFSC_MASK) != IFSC_NO_WALK_SEA) || (EC_BITS(esr) != EC_IABORT_CUR_EL)) {
+ ERROR("Rec2 incorrect ESR=0x%lx\n", esr);
+ goto destroy_realm;
+ }
+ INFO("Rec2 ESR=0x%lx\n", esr);
+
+ /* Rec3 expect DA due to SEA unassigned empty page */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_HOST_CALL, 3U);
+ if (!ret1) {
+ ERROR("Rec3 did not fault\n");
+ goto undelegate_destroy;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 3U, HOST_ARG2_INDEX);
+ if (((esr & ISS_DFSC_MASK) != DFSC_NO_WALK_SEA) || (EC_BITS(esr) != EC_DABORT_CUR_EL)) {
+ ERROR("Rec3 incorrect ESR=0x%lx\n", esr);
+ }
+ INFO("Rec3 ESR=0x%lx\n", esr);
+ res = TEST_RESULT_SUCCESS;
+
+undelegate_destroy:
+ ret = host_rmi_granule_undelegate(base);
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
+
+/*
+ * Test aims to generate SEA in Realm by
+ * executing instructions in unprotected IPA - Rec0
+ * In Rec 1 , when HIPAS=UNASSIGNED_NS, we expect to get a Data abort.
+ * Then Host will inject SEA to realm.
+ * Realm exception handler runs and returns ESR back to Host
+ * Host validates ESR
+ */
+test_result_t host_realm_sea_unprotected(void)
+{
+
+ bool ret1, ret2;
+ test_result_t res = TEST_RESULT_FAIL;
+ u_register_t ret, base, base_ipa, esr;
+ unsigned int host_call_result;
+ u_register_t exit_reason;
+ struct realm realm;
+ struct rtt_entry rtt;
+ struct rmi_rec_run *run;
+ u_register_t rec_flag[2U] = {RMI_RUNNABLE, RMI_RUNNABLE};
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 2U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ /* Can choose any unprotected IPA adr, TFTF_BASE chosen for convenience */
+ base = TFTF_BASE;
+ base_ipa = base | (1UL << (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ,
+ realm.rmm_feat_reg0) - 1U));
+
+
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (rtt.state != RMI_UNASSIGNED) {
+ ERROR("wrong state\n");
+ goto destroy_realm;
+ }
+
+ run = (struct rmi_rec_run *)realm.run[0];
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base_ipa);
+ host_shared_data_set_host_val(&realm, 1U, HOST_ARG1_INDEX, base_ipa);
+
+ /* Rec0 expect SEA in realm due to IA unprotected IPA page */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1) {
+ ERROR("Rec0 did not fault\n");
+ goto destroy_realm;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 0U, HOST_ARG2_INDEX);
+ if (((esr & ISS_IFSC_MASK) != IFSC_NO_WALK_SEA) || (EC_BITS(esr) != EC_IABORT_CUR_EL)) {
+ ERROR("Rec0 incorrect ESR=0x%lx\n", esr);
+ goto destroy_realm;
+ }
+ INFO("Rec0 ESR=0x%lx\n", esr);
+
+ run = (struct rmi_rec_run *)realm.run[1U];
+
+ /* Rec1 expect rec exit due to DA unprotected IPA page when HIPAS is UNASSIGNED_NS */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_SYNC, 1U);
+
+ if (!ret1 || (run->exit.hpfar >> 4U) != (base_ipa >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_DABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_DFSC_MASK) < DFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_DFSC_MASK) > DFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U)) {
+ ERROR("Rec1 did not fault exit=0x%lx ret1=%d HPFAR=0x%lx esr=0x%lx\n",
+ run->exit.exit_reason, ret1, run->exit.hpfar, run->exit.esr);
+ goto destroy_realm;
+ }
+ INFO("Host DA FAR=0x%lx, HPFAR=0x%lx\n", run->exit.far, run->exit.hpfar);
+ INFO("Injecting SEA to Realm\n");
+
+ /* Inject SEA back to Realm */
+ run->entry.flags = REC_ENTRY_FLAG_INJECT_SEA;
+
+ /* Rec1 re-entry expect exception handler to run, return ESR */
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 1U);
+ if (ret != RMI_SUCCESS || exit_reason != RMI_EXIT_HOST_CALL) {
+ ERROR("rec1 failed ret=0x%lx exit_reason=0x%lx", ret, run->exit.exit_reason);
+ goto destroy_realm;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 1U, HOST_ARG2_INDEX);
+ if (((esr & ISS_DFSC_MASK) != DFSC_NO_WALK_SEA) || (EC_BITS(esr) != EC_DABORT_CUR_EL)) {
+ ERROR("Rec1 incorrect ESR=0x%lx\n", esr);
+ goto destroy_realm;
+ }
+ INFO("Rec1 ESR=0x%lx\n", esr);
+ res = host_call_result;
+
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
+
+/*
+ * @Test_Aim@ Test to check if DIT bit is preserved across NS/RL switch
+ */
+test_result_t host_realm_enable_dit(void)
+{
+ bool ret1, ret2;
+ struct realm realm;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,
+ RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE}, dit;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, MAX_REC_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Enable FEAT_DIT on Host */
+ write_dit(DIT_BIT);
+ for (unsigned int i = 0; i < MAX_REC_COUNT; i++) {
+ host_shared_data_set_host_val(&realm, i, HOST_ARG1_INDEX, 10U);
+ ret1 = host_enter_realm_execute(&realm, REALM_DIT_CHECK_CMD,
+ RMI_EXIT_HOST_CALL, i);
+ if (!ret1) {
+ break;
+ }
+ }
+
+ ret2 = host_destroy_realm(&realm);
+
+ dit = read_dit();
+ if (dit != DIT_BIT) {
+ ERROR("Host DIT bit not preserved\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ write_dit(0U);
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/realm_payload/host_realm_spm.c b/tftf/tests/runtime_services/realm_payload/host_realm_spm.c
new file mode 100644
index 000000000..51b87e7b1
--- /dev/null
+++ b/tftf/tests/runtime_services/realm_payload/host_realm_spm.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <cactus_test_cmds.h>
+#include <ffa_endpoints.h>
+#include <ffa_helpers.h>
+#include <fpu.h>
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_shared_data.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+
+#define REALM_TIME_SLEEP 300U
+#define SENDER HYP_ID
+#define RECEIVER SP_ID(1)
+static const struct ffa_uuid expected_sp_uuids[] = { {PRIMARY_UUID} };
+static struct mailbox_buffers mb;
+static bool secure_mailbox_initialised;
+
+static fpu_state_t ns_fpu_state_write;
+static fpu_state_t ns_fpu_state_read;
+static struct realm realm;
+
+typedef enum security_state {
+ NONSECURE_WORLD = 0U,
+ REALM_WORLD,
+ SECURE_WORLD,
+ SECURITY_STATE_MAX
+} security_state_t;
+
+/*
+ * This function helps to Initialise secure_mailbox, creates realm payload and
+ * shared memory to be used between Host and Realm.
+ * Skip test if RME is not supported or not the right RMM version is begin used
+ */
+static test_result_t init_sp(void)
+{
+ if (!secure_mailbox_initialised) {
+ GET_TFTF_MAILBOX(mb);
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ secure_mailbox_initialised = true;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t init_realm(void)
+{
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+
+ /*
+ * Initialise Realm payload
+ */
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE, 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Create shared memory between Host and Realm
+ */
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static bool host_realm_handle_fiq_exit(struct realm *realm_ptr,
+ unsigned int rec_num)
+{
+ struct rmi_rec_run *run = (struct rmi_rec_run *)realm_ptr->run[rec_num];
+ if (run->exit.exit_reason == RMI_EXIT_FIQ) {
+ return true;
+ }
+ return false;
+}
+
+/* Send request to SP to fill FPU/SIMD regs with secure template values */
+static bool fpu_fill_sec(void)
+{
+ struct ffa_value ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret)) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ return false;
+ }
+ if (cactus_get_response(ret) == CACTUS_ERROR) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ return false;
+ }
+ return true;
+}
+
+/* Send request to SP to compare FPU/SIMD regs with secure template values */
+static bool fpu_cmp_sec(void)
+{
+ struct ffa_value ret = cactus_req_simd_compare_send_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret)) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ return false;
+ }
+ if (cactus_get_response(ret) == CACTUS_ERROR) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ return false;
+ }
+ return true;
+}
+
+
+/* Send request to Realm to fill FPU/SIMD regs with realm template values */
+static bool fpu_fill_rl(void)
+{
+ if (!host_enter_realm_execute(&realm, REALM_REQ_FPU_FILL_CMD, RMI_EXIT_HOST_CALL, 0U)) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ return false;
+ }
+ return true;
+}
+
+/* Send request to Realm to compare FPU/SIMD regs with previous realm template values */
+static bool fpu_cmp_rl(void)
+{
+ if (!host_enter_realm_execute(&realm, REALM_REQ_FPU_CMP_CMD, RMI_EXIT_HOST_CALL, 0U)) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * @Test_Aim@ Test secure interrupt handling while Secure Partition is in waiting
+ * state and Realm world runs a busy loop at R-EL1.
+ *
+ * 1. Send a direct message request command to first Cactus SP to start the
+ * trusted watchdog timer.
+ *
+ * 2. Once the SP returns with a direct response message, it moves to WAITING
+ * state.
+ *
+ * 3. Create and execute a busy loop to sleep the PE in the realm world for
+ * REALM_TIME_SLEEP ms.
+ *
+ * 4. Trusted watchdog timer expires during this time which leads to secure
+ * interrupt being triggered while cpu is executing in realm world.
+ *
+ * 5. Realm EL1 exits to host, but because the FIQ is still pending,
+ * the Host will be pre-empted to EL3.
+ *
+ * 6. The interrupt is trapped to BL31/SPMD as FIQ and later synchronously
+ * delivered to SPM.
+ *
+ * 7. SPM injects a virtual IRQ to first Cactus Secure Partition.
+ *
+ * 8. Once the SP has handled the interrupt, it returns execution back to normal
+ * world using FFA_MSG_WAIT call.
+ *
+ * 9. TFTF parses REC's exit reason (FIQ in this case).
+ *
+ * 10. TFTF sends a direct request message to SP to query the ID of last serviced
+ * secure virtual interrupt.
+ *
+ * 121. Further, TFTF expects SP to return the ID of Trusted Watchdog timer
+ * interrupt through a direct response message.
+ *
+ * 13. Test finishes successfully once the TFTF disables the trusted watchdog
+ * interrupt through a direct message request command.
+ *
+ * 14. TFTF then proceed to destroy the Realm.
+ *
+ */
+test_result_t host_realm_sec_interrupt_can_preempt_rl(void)
+{
+ struct ffa_value ret_values;
+ test_result_t res;
+
+ /* Verify RME is present and RMM is not TRP */
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ /* Verify that FFA is there and that it has the correct version. */
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ res = init_sp();
+ if (res != TEST_RESULT_SUCCESS) {
+ return res;
+ }
+
+ res = init_realm();
+ if (res != TEST_RESULT_SUCCESS) {
+ return res;
+ }
+
+ /* Enable trusted watchdog interrupt as IRQ in the secure side. */
+ if (!enable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ goto destroy_realm;
+ }
+
+ /*
+ * Send a message to SP1 through direct messaging.
+ */
+ ret_values = cactus_send_twdog_cmd(SENDER, RECEIVER,
+ (REALM_TIME_SLEEP/2));
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for starting TWDOG timer\n");
+ goto destroy_realm;
+ }
+
+ /*
+ * Spin Realm payload for REALM_TIME_SLEEP ms, This ensures secure wdog
+ * timer triggers during this time.
+ */
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, REALM_TIME_SLEEP);
+ host_enter_realm_execute(&realm, REALM_SLEEP_CMD, RMI_EXIT_FIQ, 0U);
+
+ /*
+ * Check if Realm exit reason is FIQ.
+ */
+ if (!host_realm_handle_fiq_exit(&realm, 0U)) {
+ ERROR("Trusted watchdog timer interrupt not fired\n");
+ goto destroy_realm;
+ }
+
+ /* Check for the last serviced secure virtual interrupt. */
+ ret_values = cactus_get_last_interrupt_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for last serviced interrupt"
+ " command\n");
+ goto destroy_realm;
+ }
+
+ /* Make sure Trusted Watchdog timer interrupt was serviced*/
+ if (cactus_get_response(ret_values) != IRQ_TWDOG_INTID) {
+ ERROR("Trusted watchdog timer interrupt not serviced by SP\n");
+ goto destroy_realm;
+ }
+
+ /* Disable Trusted Watchdog interrupt. */
+ if (!disable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ goto destroy_realm;
+ }
+
+ if (!host_destroy_realm(&realm)) {
+ ERROR("host_destroy_realm error\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+
+destroy_realm:
+ host_destroy_realm(&realm);
+ return TEST_RESULT_FAIL;
+}
+
+/* Choose a random security state that is different from the 'current' state */
+static security_state_t get_random_security_state(security_state_t current,
+ bool is_sp_present)
+{
+ security_state_t next;
+
+ /*
+ * 3 world config: Switch between NS world and Realm world as Secure
+ * world is not enabled or SP is not loaded.
+ */
+ if (!is_sp_present) {
+ if (current == NONSECURE_WORLD) {
+ return REALM_WORLD;
+ } else {
+ return NONSECURE_WORLD;
+ }
+ }
+
+ /*
+ * 4 world config: Randomly select a security_state between Realm, NS
+ * and Secure until the new state is not equal to the current state.
+ */
+ while (true) {
+ next = rand() % SECURITY_STATE_MAX;
+ if (next == current) {
+ continue;
+ }
+
+ break;
+ }
+
+ return next;
+}
+
+/*
+ * Test whether FPU/SIMD state (32 SIMD vectors, FPCR and FPSR registers) are
+ * preserved during a random context switch between Secure/Non-Secure/Realm world
+ *
+ * Below steps are performed by this test:
+ *
+ * Init:
+ * Fill FPU registers with random values in
+ * 1. NS World (NS-EL2)
+ * 2. Realm world (R-EL1)
+ * 3. Secure world (S-EL1) (if SP loaded)
+ *
+ * Test loop:
+ * security_state_next = get_random_security_state(current, is_sp_present)
+ *
+ * switch to security_state_next
+ * if (FPU registers read != last filled values)
+ * break loop; return TC_FAIL
+ *
+ * Fill FPU registers with new random values for the next comparison.
+ */
+test_result_t host_realm_fpu_access_in_rl_ns_se(void)
+{
+ security_state_t sec_state;
+ bool is_sp_present;
+ test_result_t res;
+
+ /* Verify RME is present and RMM is not TRP */
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ /* Verify that FFA is there and that it has the correct version. */
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ res = init_realm();
+ if (res != TEST_RESULT_SUCCESS) {
+ return res;
+ }
+
+ /* Fill FPU registers in Non-secure world */
+ fpu_state_write_rand(&ns_fpu_state_write);
+
+ /* Fill FPU registers in Realm world */
+ if (!fpu_fill_rl()) {
+ ERROR("fpu_fill_rl error\n");
+ goto destroy_realm;
+ }
+ sec_state = REALM_WORLD;
+
+ /* Fill FPU registers in Secure world if present */
+ res = init_sp();
+ if (res == TEST_RESULT_SUCCESS) {
+ if (!fpu_fill_sec()) {
+ ERROR("fpu_fill_sec error\n");
+ goto destroy_realm;
+ }
+
+ sec_state = SECURE_WORLD;
+ is_sp_present = true;
+ } else {
+ is_sp_present = false;
+ }
+
+ for (uint32_t i = 0; i < 128; i++) {
+ sec_state = get_random_security_state(sec_state, is_sp_present);
+
+ switch (sec_state) {
+ case NONSECURE_WORLD:
+ /* NS world verify its FPU/SIMD state registers */
+ fpu_state_read(&ns_fpu_state_read);
+ if (fpu_state_compare(&ns_fpu_state_write,
+ &ns_fpu_state_read)) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ goto destroy_realm;
+ }
+
+ /* Fill FPU state with new random values in NS world */
+ fpu_state_write_rand(&ns_fpu_state_write);
+ break;
+ case REALM_WORLD:
+ /* Realm world verify its FPU/SIMD state registers */
+ if (!fpu_cmp_rl()) {
+ goto destroy_realm;
+ }
+
+ /* Fill FPU state with new random values in Realm */
+ if (!fpu_fill_rl()) {
+ goto destroy_realm;
+ }
+
+ break;
+ case SECURE_WORLD:
+ /* Secure world verify its FPU/SIMD state registers */
+ if (!fpu_cmp_sec()) {
+ goto destroy_realm;
+ }
+
+ /* Fill FPU state with new random values in SP */
+ if (!fpu_fill_sec()) {
+ goto destroy_realm;
+
+ }
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!host_destroy_realm(&realm)) {
+ ERROR("host_destroy_realm error\n");
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+destroy_realm:
+ host_destroy_realm(&realm);
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/runtime_services/secure_service/aarch32/ffa_arch_helpers.S b/tftf/tests/runtime_services/secure_service/aarch32/ffa_arch_helpers.S
new file mode 100644
index 000000000..0d5395fa1
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/aarch32/ffa_arch_helpers.S
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .macro service_call _conduit
+ /* Push r9 to keep the stack pointer aligned to 64 bit. */
+ push {r4 - r9}
+
+ /*
+ * Save the ffa_value pointer in a callee saved register.
+ */
+ mov r8, r0
+
+ /* Load the argument values into the appropriate registers. */
+ ldm r0, {r0 - r7}
+
+ \_conduit #0
+
+ /*
+ * The return values are stored in x0-x7, put them in the ffa_value
+ * return structure.
+ */
+ stm r8, {r0 - r7}
+
+ pop {r4 - r9}
+ .endm
+
+.globl ffa_svc
+func ffa_svc
+ service_call svc
+ bx lr
+endfunc ffa_svc
+
+.globl ffa_smc
+func ffa_smc
+ service_call smc
+ bx lr
+endfunc ffa_smc
diff --git a/tftf/tests/runtime_services/secure_service/aarch64/ffa_arch_helpers.S b/tftf/tests/runtime_services/secure_service/aarch64/ffa_arch_helpers.S
new file mode 100644
index 000000000..b9c9cd9fd
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/aarch64/ffa_arch_helpers.S
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .macro service_call _conduit
+ /*
+ * Use a callee saved register to point to ffa_value structure after
+ * returning from the conduit.
+ * Although x19 contains an 8-byte value, we are allocating 16 bytes
+ * on the stack to respect the 16-byte stack-alignment.
+ */
+ str x19, [sp, #-16]!
+
+ /*
+ * Save pointed to ffa_value structure into x19, which is a callee saved
+ * register.
+ */
+ mov x19, x0
+ /* Load the argument values into the appropriate registers. */
+ ldp x16, x17, [x0, #128]
+ ldp x14, x15, [x0, #112]
+ ldp x12, x13, [x0, #96]
+ ldp x10, x11, [x0, #80]
+ ldp x8, x9, [x0, #64]
+ ldp x6, x7, [x0, #48]
+ ldp x4, x5, [x0, #32]
+ ldp x2, x3, [x0, #16]
+ ldp x0, x1, [x0, #0]
+
+ \_conduit #0
+
+ /*
+ * The return values are stored in x0-x17, put them in the ffa_value
+ * return structure. x19 points to the ffa_value structure.
+ */
+ stp x0, x1, [x19, #0]
+ stp x2, x3, [x19, #16]
+ stp x4, x5, [x19, #32]
+ stp x6, x7, [x19, #48]
+ stp x8, x9, [x19, #64]
+ stp x10, x11, [x19, #80]
+ stp x12, x13, [x19, #96]
+ stp x14, x15, [x19, #112]
+ stp x16, x17, [x19, #128]
+ ldr x19, [sp], #16
+ .endm
+
+.globl ffa_svc
+func ffa_svc
+ service_call svc
+ ret
+endfunc ffa_svc
+
+.globl ffa_smc
+func ffa_smc
+ service_call smc
+ ret
+endfunc ffa_smc
diff --git a/tftf/tests/runtime_services/secure_service/ffa_helpers.c b/tftf/tests/runtime_services/secure_service/ffa_helpers.c
index 8e7b58c6f..8b53bb001 100644
--- a/tftf/tests/runtime_services/secure_service/ffa_helpers.c
+++ b/tftf/tests/runtime_services/secure_service/ffa_helpers.c
@@ -1,13 +1,24 @@
/*
- * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <smccc.h>
+#include <assert.h>
#include <ffa_endpoints.h>
#include <ffa_helpers.h>
#include <ffa_svc.h>
+#include <smccc.h>
+
+struct ffa_value ffa_service_call(struct ffa_value *args)
+{
+#if IMAGE_IVY
+ ffa_svc(args);
+#else
+ ffa_smc(args);
+#endif
+ return *args;
+}
/*-----------------------------------------------------------------------------
* FFA_RUN
@@ -26,15 +37,15 @@
* -BUSY: vCPU is busy and caller must retry later
* -ABORTED: vCPU or VM ran into an unexpected error and has aborted
*/
-smc_ret_values ffa_run(uint32_t dest_id, uint32_t vcpu_id)
+struct ffa_value ffa_run(uint32_t dest_id, uint32_t vcpu_id)
{
- smc_args args = {
- FFA_MSG_RUN,
+ struct ffa_value args = {
+ FFA_RUN,
(dest_id << 16) | vcpu_id,
0, 0, 0, 0, 0, 0
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/*-----------------------------------------------------------------------------
@@ -55,12 +66,12 @@ smc_ret_values ffa_run(uint32_t dest_id, uint32_t vcpu_id)
* -BUSY: Message target is busy
* -ABORTED: Message target ran into an unexpected error and has aborted
*/
-smc_ret_values ffa_msg_send_direct_req64(ffa_vm_id_t source_id,
- ffa_vm_id_t dest_id, uint64_t arg0,
- uint64_t arg1, uint64_t arg2,
- uint64_t arg3, uint64_t arg4)
+struct ffa_value ffa_msg_send_direct_req64(ffa_id_t source_id,
+ ffa_id_t dest_id, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MSG_SEND_DIRECT_REQ_SMC64,
.arg1 = ((uint32_t)(source_id << 16)) | (dest_id),
.arg2 = 0,
@@ -71,15 +82,15 @@ smc_ret_values ffa_msg_send_direct_req64(ffa_vm_id_t source_id,
.arg7 = arg4,
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
-smc_ret_values ffa_msg_send_direct_req32(ffa_vm_id_t source_id,
- ffa_vm_id_t dest_id, uint32_t arg0,
- uint32_t arg1, uint32_t arg2,
- uint32_t arg3, uint32_t arg4)
+struct ffa_value ffa_msg_send_direct_req32(ffa_id_t source_id,
+ ffa_id_t dest_id, uint32_t arg0,
+ uint32_t arg1, uint32_t arg2,
+ uint32_t arg3, uint32_t arg4)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MSG_SEND_DIRECT_REQ_SMC32,
.arg1 = ((uint32_t)(source_id << 16)) | (dest_id),
.arg2 = 0,
@@ -90,15 +101,15 @@ smc_ret_values ffa_msg_send_direct_req32(ffa_vm_id_t source_id,
.arg7 = arg4,
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
-smc_ret_values ffa_msg_send_direct_resp64(ffa_vm_id_t source_id,
- ffa_vm_id_t dest_id, uint64_t arg0,
- uint64_t arg1, uint64_t arg2,
- uint64_t arg3, uint64_t arg4)
+struct ffa_value ffa_msg_send_direct_resp64(ffa_id_t source_id,
+ ffa_id_t dest_id, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MSG_SEND_DIRECT_RESP_SMC64,
.arg1 = ((uint32_t)(source_id << 16)) | (dest_id),
.arg2 = 0,
@@ -109,15 +120,15 @@ smc_ret_values ffa_msg_send_direct_resp64(ffa_vm_id_t source_id,
.arg7 = arg4,
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
-smc_ret_values ffa_msg_send_direct_resp32(ffa_vm_id_t source_id,
- ffa_vm_id_t dest_id, uint32_t arg0,
- uint32_t arg1, uint32_t arg2,
- uint32_t arg3, uint32_t arg4)
+struct ffa_value ffa_msg_send_direct_resp32(ffa_id_t source_id,
+ ffa_id_t dest_id, uint32_t arg0,
+ uint32_t arg1, uint32_t arg2,
+ uint32_t arg3, uint32_t arg4)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MSG_SEND_DIRECT_RESP_SMC32,
.arg1 = ((uint32_t)(source_id << 16)) | (dest_id),
.arg2 = 0,
@@ -128,85 +139,64 @@ smc_ret_values ffa_msg_send_direct_resp32(ffa_vm_id_t source_id,
.arg7 = arg4,
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
-
-/**
- * Initialises the header of the given `ffa_memory_region`, not including the
- * composite memory region offset.
- */
-static void ffa_memory_region_init_header(
- struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
- ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
- ffa_memory_handle_t handle, uint32_t tag, ffa_vm_id_t receiver,
- ffa_memory_access_permissions_t permissions)
+void ffa_memory_region_init_header(struct ffa_memory_region *memory_region,
+ ffa_id_t sender,
+ ffa_memory_attributes_t attributes,
+ ffa_memory_region_flags_t flags,
+ ffa_memory_handle_t handle, uint32_t tag,
+ uint32_t receiver_count)
{
memory_region->sender = sender;
memory_region->attributes = attributes;
- memory_region->reserved_0 = 0;
memory_region->flags = flags;
memory_region->handle = handle;
memory_region->tag = tag;
- memory_region->reserved_1 = 0;
- memory_region->receiver_count = 1;
- memory_region->receivers[0].receiver_permissions.receiver = receiver;
- memory_region->receivers[0].receiver_permissions.permissions =
- permissions;
- memory_region->receivers[0].receiver_permissions.flags = 0;
- memory_region->receivers[0].reserved_0 = 0;
+ memory_region->memory_access_desc_size =
+ sizeof(struct ffa_memory_access);
+ memory_region->receiver_count = receiver_count;
+ memory_region->receivers_offset =
+ offsetof(struct ffa_memory_region, receivers);
+ memset(memory_region->reserved, 0, sizeof(memory_region->reserved));
}
/**
- * Initialises the given `ffa_memory_region` and copies as many as possible of
- * the given constituents to it.
+ * Copies as many as possible of the given constituents to the respective
+ * memory region and sets the respective offset.
*
* Returns the number of constituents remaining which wouldn't fit, and (via
* return parameters) the size in bytes of the first fragment of data copied to
* `memory_region` (attributes, constituents and memory region header size), and
* the total size of the memory sharing message including all constituents.
*/
-uint32_t ffa_memory_region_init(
+static uint32_t ffa_memory_region_init_constituents(
struct ffa_memory_region *memory_region, size_t memory_region_max_size,
- ffa_vm_id_t sender, ffa_vm_id_t receiver,
const struct ffa_memory_region_constituent constituents[],
- uint32_t constituent_count, uint32_t tag,
- ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
- enum ffa_instruction_access instruction_access,
- enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
- enum ffa_memory_shareability shareability, uint32_t *total_length,
+ uint32_t constituent_count, uint32_t *total_length,
uint32_t *fragment_length)
{
- ffa_memory_access_permissions_t permissions = 0;
- ffa_memory_attributes_t attributes = 0;
struct ffa_composite_memory_region *composite_memory_region;
uint32_t fragment_max_constituents;
- uint32_t count_to_copy;
- uint32_t i;
uint32_t constituents_offset;
+ uint32_t count_to_copy;
- /* Set memory region's permissions. */
- ffa_set_data_access_attr(&permissions, data_access);
- ffa_set_instruction_access_attr(&permissions, instruction_access);
-
- /* Set memory region's page attributes. */
- ffa_set_memory_type_attr(&attributes, type);
- ffa_set_memory_cacheability_attr(&attributes, cacheability);
- ffa_set_memory_shareability_attr(&attributes, shareability);
-
- ffa_memory_region_init_header(memory_region, sender, attributes, flags,
- 0, tag, receiver, permissions);
/*
* Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
* ffa_memory_access)` must both be multiples of 16 (as verified by the
* asserts in `ffa_memory.c`, so it is guaranteed that the offset we
* calculate here is aligned to a 64-bit boundary and so 64-bit values
* can be copied without alignment faults.
+ * If there are multiple receiver endpoints, their respective access
+ * structure should point to the same offset value.
*/
- memory_region->receivers[0].composite_memory_region_offset =
- sizeof(struct ffa_memory_region) +
- memory_region->receiver_count *
- sizeof(struct ffa_memory_access);
+ for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
+ memory_region->receivers[i].composite_memory_region_offset =
+ sizeof(struct ffa_memory_region) +
+ memory_region->receiver_count *
+ sizeof(struct ffa_memory_access);
+ }
composite_memory_region =
ffa_memory_region_get_composite(memory_region, 0);
@@ -226,7 +216,7 @@ uint32_t ffa_memory_region_init(
count_to_copy = fragment_max_constituents;
}
- for (i = 0; i < constituent_count; ++i) {
+ for (uint32_t i = 0; i < constituent_count; ++i) {
if (i < count_to_copy) {
composite_memory_region->constituents[i] =
constituents[i];
@@ -254,43 +244,116 @@ uint32_t ffa_memory_region_init(
/**
* Initialises the given `ffa_memory_region` to be used for an
* `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
+ * Initialises the given `ffa_memory_region` and copies as many as possible of
+ * the given constituents to it.
+ *
+ * Returns the number of constituents remaining which wouldn't fit, and (via
+ * return parameters) the size in bytes of the first fragment of data copied to
+ * `memory_region` (attributes, constituents and memory region header size), and
+ * the total size of the memory sharing message including all constituents.
+ */
+uint32_t ffa_memory_region_init(
+ struct ffa_memory_region *memory_region, size_t memory_region_max_size,
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t tag,
+ ffa_memory_region_flags_t flags, enum ffa_memory_type type,
+ enum ffa_memory_cacheability cacheability,
+ enum ffa_memory_shareability shareability, uint32_t *total_length,
+ uint32_t *fragment_length)
+{
+ ffa_memory_attributes_t attributes = {
+ .type = type,
+ .cacheability = cacheability,
+ .shareability = shareability,
+ };
+
+ ffa_memory_region_init_header(memory_region, sender, attributes, flags,
+ 0, tag, receiver_count);
+
+ memcpy(memory_region->receivers, receivers,
+ receiver_count * sizeof(struct ffa_memory_access));
+
+ return ffa_memory_region_init_constituents(
+ memory_region, memory_region_max_size, constituents,
+ constituent_count, total_length, fragment_length);
+}
+
+uint32_t ffa_memory_fragment_init(
+ struct ffa_memory_region_constituent *fragment,
+ size_t fragment_max_size,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t *fragment_length)
+{
+ const uint32_t fragment_max_constituents =
+ fragment_max_size /
+ sizeof(struct ffa_memory_region_constituent);
+
+ uint32_t count_to_copy =
+ MIN(constituent_count, fragment_max_constituents);
+
+ for (uint32_t i = 0; i < count_to_copy; ++i) {
+ fragment[i] = constituents[i];
+ }
+
+ if (fragment_length != NULL) {
+ *fragment_length = count_to_copy *
+ sizeof(struct ffa_memory_region_constituent);
+ }
+
+ return constituent_count - count_to_copy;
+}
+
+/**
+ * Initialises the given `ffa_memory_region` to be used for an
+ * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
*
* Returns the size of the message written.
*/
uint32_t ffa_memory_retrieve_request_init(
struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
- ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
- ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
- enum ffa_instruction_access instruction_access,
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count, uint32_t tag, ffa_memory_region_flags_t flags,
enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
enum ffa_memory_shareability shareability)
{
- ffa_memory_access_permissions_t permissions = 0;
- ffa_memory_attributes_t attributes = 0;
+ ffa_memory_attributes_t attributes = {
+ .type = type,
+ .cacheability = cacheability,
+ .shareability = shareability,
+ };
- /* Set memory region's permissions. */
- ffa_set_data_access_attr(&permissions, data_access);
- ffa_set_instruction_access_attr(&permissions, instruction_access);
+ ffa_memory_region_init_header(memory_region, sender, attributes, flags,
+ handle, tag, receiver_count);
- /* Set memory region's page attributes. */
- ffa_set_memory_type_attr(&attributes, type);
- ffa_set_memory_cacheability_attr(&attributes, cacheability);
- ffa_set_memory_shareability_attr(&attributes, shareability);
+ memcpy(memory_region->receivers, receivers,
+ receiver_count * sizeof(struct ffa_memory_access));
- ffa_memory_region_init_header(memory_region, sender, attributes, flags,
- handle, tag, receiver, permissions);
/*
* Offset 0 in this case means that the hypervisor should allocate the
* address ranges. This is the only configuration supported by Hafnium,
* as it enforces 1:1 mappings in the stage 2 page tables.
*/
- memory_region->receivers[0].composite_memory_region_offset = 0;
- memory_region->receivers[0].reserved_0 = 0;
+ for (uint32_t i = 0; i < receiver_count; i++) {
+ memory_region->receivers[i].composite_memory_region_offset = 0;
+ memory_region->receivers[i].reserved_0 = 0;
+ }
return sizeof(struct ffa_memory_region) +
memory_region->receiver_count * sizeof(struct ffa_memory_access);
}
+/**
+ * Configure `region` for a hypervisor retrieve request - i.e. all fields except
+ * `handle` are initialized to 0.
+ */
+void ffa_hypervisor_retrieve_request_init(struct ffa_memory_region *region,
+ ffa_memory_handle_t handle)
+{
+ memset(region, 0, sizeof(struct ffa_memory_region));
+ region->handle = handle;
+}
/*
* FFA Version ABI helper.
@@ -298,98 +361,159 @@ uint32_t ffa_memory_retrieve_request_init(
* -Bits[30:16]: Major version.
* -Bits[15:0]: Minor version.
*/
-smc_ret_values ffa_version(uint32_t input_version)
+struct ffa_value ffa_version(uint32_t input_version)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_VERSION,
.arg1 = input_version
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
-smc_ret_values ffa_id_get(void)
+struct ffa_value ffa_id_get(void)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_ID_GET
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_spm_id_get(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_SPM_ID_GET
+ };
+
+ return ffa_service_call(&args);
}
-smc_ret_values ffa_msg_wait(void)
+struct ffa_value ffa_msg_wait(void)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MSG_WAIT
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
-smc_ret_values ffa_error(int32_t error_code)
+struct ffa_value ffa_error(int32_t error_code)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_ERROR,
.arg1 = 0,
.arg2 = error_code
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Query the higher EL if the requested FF-A feature is implemented. */
-smc_ret_values ffa_features(uint32_t feature)
+struct ffa_value ffa_features(uint32_t feature)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_FEATURES,
.arg1 = feature
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
+}
+
+/* Query the higher EL if the requested FF-A feature is implemented. */
+struct ffa_value ffa_features_with_input_property(uint32_t feature, uint32_t param)
+{
+ struct ffa_value args = {
+ .fid = FFA_FEATURES,
+ .arg1 = feature,
+ .arg2 = param,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Get information about VMs or SPs based on UUID, using registers. */
+struct ffa_value ffa_partition_info_get_regs(const struct ffa_uuid uuid,
+ const uint16_t start_index,
+ const uint16_t tag)
+{
+ uint64_t arg1 = (uint64_t)uuid.uuid[1] << 32 | uuid.uuid[0];
+ uint64_t arg2 = (uint64_t)uuid.uuid[3] << 32 | uuid.uuid[2];
+ uint64_t arg3 = start_index | (uint64_t)tag << 16;
+
+ struct ffa_value args = {
+ .fid = FFA_PARTITION_INFO_GET_REGS_SMC64,
+ .arg1 = arg1,
+ .arg2 = arg2,
+ .arg3 = arg3,
+ };
+
+ return ffa_service_call(&args);
}
/* Get information about VMs or SPs based on UUID */
-smc_ret_values ffa_partition_info_get(const uint32_t uuid[4])
+struct ffa_value ffa_partition_info_get(const struct ffa_uuid uuid)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_PARTITION_INFO_GET,
- .arg1 = uuid[0],
- .arg2 = uuid[1],
- .arg3 = uuid[2],
- .arg4 = uuid[3]
+ .arg1 = uuid.uuid[0],
+ .arg2 = uuid.uuid[1],
+ .arg3 = uuid.uuid[2],
+ .arg4 = uuid.uuid[3]
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Query SPMD that the rx buffer of the partition can be released */
-smc_ret_values ffa_rx_release(void)
+struct ffa_value ffa_rx_release(void)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_RX_RELEASE
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Map the RXTX buffer */
-smc_ret_values ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages)
+struct ffa_value ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_RXTX_MAP_SMC64,
.arg1 = send,
.arg2 = recv,
- .arg3 = pages
+ .arg3 = pages,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
+}
+
+/* Unmap the RXTX buffer allocated by the given FF-A component */
+struct ffa_value ffa_rxtx_unmap(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_RXTX_UNMAP,
+ .arg1 = FFA_PARAM_MBZ,
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
}
/* Donate memory to another partition */
-smc_ret_values ffa_mem_donate(uint32_t descriptor_length,
+struct ffa_value ffa_mem_donate(uint32_t descriptor_length,
uint32_t fragment_length)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MEM_DONATE_SMC32,
.arg1 = descriptor_length,
.arg2 = fragment_length,
@@ -397,14 +521,14 @@ smc_ret_values ffa_mem_donate(uint32_t descriptor_length,
.arg4 = FFA_PARAM_MBZ
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Lend memory to another partition */
-smc_ret_values ffa_mem_lend(uint32_t descriptor_length,
- uint32_t fragment_length)
+struct ffa_value ffa_mem_lend(uint32_t descriptor_length,
+ uint32_t fragment_length)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MEM_LEND_SMC32,
.arg1 = descriptor_length,
.arg2 = fragment_length,
@@ -412,14 +536,14 @@ smc_ret_values ffa_mem_lend(uint32_t descriptor_length,
.arg4 = FFA_PARAM_MBZ
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Share memory with another partition */
-smc_ret_values ffa_mem_share(uint32_t descriptor_length,
- uint32_t fragment_length)
+struct ffa_value ffa_mem_share(uint32_t descriptor_length,
+ uint32_t fragment_length)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MEM_SHARE_SMC32,
.arg1 = descriptor_length,
.arg2 = fragment_length,
@@ -427,14 +551,14 @@ smc_ret_values ffa_mem_share(uint32_t descriptor_length,
.arg4 = FFA_PARAM_MBZ
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Retrieve memory shared by another partition */
-smc_ret_values ffa_mem_retrieve_req(uint32_t descriptor_length,
- uint32_t fragment_length)
+struct ffa_value ffa_mem_retrieve_req(uint32_t descriptor_length,
+ uint32_t fragment_length)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MEM_RETRIEVE_REQ_SMC32,
.arg1 = descriptor_length,
.arg2 = fragment_length,
@@ -445,28 +569,240 @@ smc_ret_values ffa_mem_retrieve_req(uint32_t descriptor_length,
.arg7 = FFA_PARAM_MBZ
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Relinquish access to memory region */
-smc_ret_values ffa_mem_relinquish(void)
+struct ffa_value ffa_mem_relinquish(void)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MEM_RELINQUISH,
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Reclaim exclusive access to owned memory region */
-smc_ret_values ffa_mem_reclaim(uint64_t handle, uint32_t flags)
+struct ffa_value ffa_mem_reclaim(uint64_t handle, uint32_t flags)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MEM_RECLAIM,
.arg1 = (uint32_t) handle,
.arg2 = (uint32_t) (handle >> 32),
.arg3 = flags
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_mem_frag_rx(ffa_memory_handle_t handle,
+ uint32_t fragment_offset)
+{
+ /* Note that sender MBZ at virtual instance. */
+ struct ffa_value args = {
+ .fid = FFA_MEM_FRAG_RX,
+ .arg1 = (uint32_t)handle,
+ .arg2 = (uint32_t)(handle >> 32),
+ .arg3 = fragment_offset,
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_mem_frag_tx(ffa_memory_handle_t handle,
+ uint32_t fragment_length)
+{
+ struct ffa_value args = {
+ .fid = FFA_MEM_FRAG_TX,
+ .arg1 = (uint32_t)handle,
+ .arg2 = (uint32_t)(handle >> 32),
+ .arg3 = fragment_length,
+ };
+
+ /* Note that sender MBZ at virtual instance. */
+ return ffa_service_call(&args);
+}
+
+/** Create Notifications Bitmap for the given VM */
+struct ffa_value ffa_notification_bitmap_create(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_BITMAP_CREATE,
+ .arg1 = vm_id,
+ .arg2 = vcpu_count,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/** Destroy Notifications Bitmap for the given VM */
+struct ffa_value ffa_notification_bitmap_destroy(ffa_id_t vm_id)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_BITMAP_DESTROY,
+ .arg1 = vm_id,
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/** Bind VM to all the notifications in the bitmap */
+struct ffa_value ffa_notification_bind(ffa_id_t sender, ffa_id_t receiver,
+ uint32_t flags,
+ ffa_notification_bitmap_t bitmap)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_BIND,
+ .arg1 = (sender << 16) | (receiver),
+ .arg2 = flags,
+ .arg3 = (uint32_t)(bitmap & 0xFFFFFFFFU),
+ .arg4 = (uint32_t)(bitmap >> 32),
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/** Unbind previously bound VM from notifications in bitmap */
+struct ffa_value ffa_notification_unbind(ffa_id_t sender,
+ ffa_id_t receiver,
+ ffa_notification_bitmap_t bitmap)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_UNBIND,
+ .arg1 = (sender << 16) | (receiver),
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = (uint32_t)(bitmap),
+ .arg4 = (uint32_t)(bitmap >> 32),
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ,
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_notification_set(ffa_id_t sender, ffa_id_t receiver,
+ uint32_t flags,
+ ffa_notification_bitmap_t bitmap)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_SET,
+ .arg1 = (sender << 16) | (receiver),
+ .arg2 = flags,
+ .arg3 = (uint32_t)(bitmap & 0xFFFFFFFFU),
+ .arg4 = (uint32_t)(bitmap >> 32),
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_notification_get(ffa_id_t receiver, uint32_t vcpu_id,
+ uint32_t flags)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_GET,
+ .arg1 = (vcpu_id << 16) | (receiver),
+ .arg2 = flags,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_notification_info_get(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_INFO_GET_SMC64,
+ .arg1 = FFA_PARAM_MBZ,
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+static size_t char_to_arg_helper(const char *message, size_t size,
+ u_register_t *arg)
+{
+ size_t to_write = size > sizeof(uint64_t) ? sizeof(uint64_t) : size;
+
+ for (int i = 0; i < to_write; i++) {
+ ((char *)arg)[i] = message[i];
+ }
+ return to_write;
+}
+
+struct ffa_value ffa_console_log(const char *message, size_t char_count)
+{
+ struct ffa_value args = {
+ .fid = FFA_CONSOLE_LOG_SMC64,
+ .arg1 = char_count,
+ };
+ size_t written = 0;
+
+ assert(char_count <= sizeof(uint64_t) * 6);
+
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg2);
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg3);
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg4);
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg5);
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg6);
+ char_to_arg_helper(&message[written], char_count - written,
+ &args.arg7);
+
+ return ffa_service_call(&args);
+}
+
+/**
+ * Initializes receiver permissions in a memory transaction descriptor.
+ */
+struct ffa_memory_access ffa_memory_access_init(
+ ffa_id_t receiver_id, enum ffa_data_access data_access,
+ enum ffa_instruction_access instruction_access,
+ ffa_memory_receiver_flags_t flags,
+ struct ffa_memory_access_impdef *impdef)
+{
+ struct ffa_memory_access access;
+ access.reserved_0 = 0;
+ access.composite_memory_region_offset = 0;
+ access.receiver_permissions.flags = flags;
+ access.receiver_permissions.receiver = receiver_id;
+ access.receiver_permissions.permissions.data_access = data_access;
+ access.receiver_permissions.permissions.instruction_access =
+ instruction_access;
+ access.impdef = impdef != NULL ? *impdef :
+ (struct ffa_memory_access_impdef){{0, 0}};
+
+ return access;
}
diff --git a/tftf/tests/runtime_services/secure_service/spm_common.c b/tftf/tests/runtime_services/secure_service/spm_common.c
index 179ef1cb9..ee25c82f6 100644
--- a/tftf/tests/runtime_services/secure_service/spm_common.c
+++ b/tftf/tests/runtime_services/secure_service/spm_common.c
@@ -1,22 +1,24 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include "stdint.h"
+
+#include "ffa_helpers.h"
+#include <cactus_test_cmds.h>
#include <debug.h>
#include <ffa_endpoints.h>
+#include <ffa_svc.h>
+#include <lib/extensions/sve.h>
#include <spm_common.h>
#include <xlat_tables_v2.h>
-#define __STR(x) #x
-#define STR(x) __STR(x)
-#define SIMD_TWO_VECTORS_BYTES_STR (2 * SIMD_VECTOR_LEN_BYTES)
-
/**
* Helper to log errors after FF-A calls.
*/
-bool is_ffa_call_error(smc_ret_values ret)
+bool is_ffa_call_error(struct ffa_value ret)
{
if (ffa_func_id(ret) == FFA_ERROR) {
VERBOSE("FF-A call returned error (%x): %d\n",
@@ -26,12 +28,25 @@ bool is_ffa_call_error(smc_ret_values ret)
return false;
}
+bool is_expected_ffa_error(struct ffa_value ret, int32_t error_code)
+{
+ if (ffa_func_id(ret) == FFA_ERROR &&
+ ffa_error_code(ret) == error_code) {
+ return true;
+ }
+
+ ERROR("Expected FFA_ERROR(%x), code: %d, got %x %d\n",
+ FFA_ERROR, error_code, ffa_func_id(ret), ffa_error_code(ret));
+
+ return false;
+}
+
/**
* Helper to verify return of FF-A call is an FFA_MSG_SEND_DIRECT_RESP.
* Should be used after FFA_MSG_SEND_DIRECT_REQ, or after sending a test command
* to an SP.
*/
-bool is_ffa_direct_response(smc_ret_values ret)
+bool is_ffa_direct_response(struct ffa_value ret)
{
if ((ffa_func_id(ret) == FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
(ffa_func_id(ret) == FFA_MSG_SEND_DIRECT_RESP_SMC64)) {
@@ -48,7 +63,7 @@ bool is_ffa_direct_response(smc_ret_values ret)
/**
* Helper to check the return value of FF-A call is as expected.
*/
-bool is_expected_ffa_return(smc_ret_values ret, uint32_t func_id)
+bool is_expected_ffa_return(struct ffa_value ret, uint32_t func_id)
{
if (ffa_func_id(ret) == func_id) {
return true;
@@ -59,56 +74,36 @@ bool is_expected_ffa_return(smc_ret_values ret, uint32_t func_id)
return false;
}
-void fill_simd_vector_regs(const simd_vector_t v[SIMD_NUM_VECTORS])
+bool is_expected_cactus_response(struct ffa_value ret, uint32_t expected_resp,
+ uint32_t arg)
{
-#ifdef __aarch64__
- __asm__ volatile(
- "ldp q0, q1, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q2, q3, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q4, q5, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q6, q7, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q8, q9, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q10, q11, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q12, q13, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q14, q15, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q16, q17, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q18, q19, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q20, q21, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q22, q23, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q24, q25, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q26, q27, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q28, q29, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q30, q31, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "sub %0, %0, #" STR(SIMD_NUM_VECTORS * SIMD_VECTOR_LEN_BYTES) ";"
- : : "r" (v));
-#endif
+ if (!is_ffa_direct_response(ret)) {
+ return false;
+ }
+
+ if (cactus_get_response(ret) != expected_resp ||
+ (uint32_t)ret.arg4 != arg) {
+ VERBOSE("Expected response %x and %x; "
+ "Obtained %x and %x\n",
+ expected_resp, arg, cactus_get_response(ret),
+ (int32_t)ret.arg4);
+ return false;
+ }
+
+ return true;
}
-void read_simd_vector_regs(simd_vector_t v[SIMD_NUM_VECTORS])
+void dump_ffa_value(struct ffa_value ret)
{
-#ifdef __aarch64__
- memset(v, 0, sizeof(simd_vector_t) * SIMD_NUM_VECTORS);
-
- __asm__ volatile(
- "stp q0, q1, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q2, q3, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q4, q5, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q6, q7, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q8, q9, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q10, q11, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q12, q13, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q14, q15, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q16, q17, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q18, q19, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q20, q21, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q22, q23, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q24, q25, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q26, q27, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q28, q29, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q30, q31, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "sub %0, %0, #" STR(SIMD_NUM_VECTORS * SIMD_VECTOR_LEN_BYTES) ";"
- : : "r" (v));
-#endif
+ NOTICE("FF-A value: %lx, %lx, %lx, %lx, %lx, %lx, %lx, %lx\n",
+ ret.fid,
+ ret.arg1,
+ ret.arg2,
+ ret.arg3,
+ ret.arg4,
+ ret.arg5,
+ ret.arg6,
+ ret.arg7);
}
/*
@@ -125,17 +120,17 @@ void read_simd_vector_regs(simd_vector_t v[SIMD_NUM_VECTORS])
bool check_spmc_execution_level(void)
{
unsigned int is_optee_spmc_criteria = 0U;
- smc_ret_values ret_values;
+ struct ffa_value ret_values;
/*
* Send a first OP-TEE-defined protocol message through
- * FFA direct message.
+ * FFA direct message. Expect it to implement either v1.0 or v1.1.
*/
ret_values = ffa_msg_send_direct_req32(HYP_ID, SP_ID(1),
OPTEE_FFA_GET_API_VERSION, 0,
0, 0, 0);
- if ((ret_values.ret3 == FFA_VERSION_MAJOR) &&
- (ret_values.ret4 == FFA_VERSION_MINOR)) {
+ if (ret_values.arg3 == 1 &&
+ (ret_values.arg4 == 0 || ret_values.arg4 == 1)) {
is_optee_spmc_criteria++;
}
@@ -146,8 +141,8 @@ bool check_spmc_execution_level(void)
ret_values = ffa_msg_send_direct_req32(HYP_ID, SP_ID(1),
OPTEE_FFA_GET_OS_VERSION,
0, 0, 0, 0);
- if ((ret_values.ret3 == OPTEE_FFA_GET_OS_VERSION_MAJOR) &&
- (ret_values.ret4 == OPTEE_FFA_GET_OS_VERSION_MINOR)) {
+ if ((ret_values.arg3 == OPTEE_FFA_GET_OS_VERSION_MAJOR) &&
+ (ret_values.arg4 == OPTEE_FFA_GET_OS_VERSION_MINOR)) {
is_optee_spmc_criteria++;
}
@@ -155,30 +150,46 @@ bool check_spmc_execution_level(void)
}
static const struct ffa_features_test ffa_feature_test_target[] = {
- {"FFA_ERROR_32 check", FFA_ERROR, FFA_SUCCESS_SMC32},
- {"FFA_SUCCESS_32 check", FFA_SUCCESS_SMC32, FFA_SUCCESS_SMC32},
- {"FFA_INTERRUPT_32 check", FFA_INTERRUPT, FFA_SUCCESS_SMC32},
- {"FFA_VERSION_32 check", FFA_VERSION, FFA_SUCCESS_SMC32},
- {"FFA_FEATURES_32 check", FFA_FEATURES, FFA_SUCCESS_SMC32},
- {"FFA_RX_RELEASE_32 check", FFA_RX_RELEASE, FFA_SUCCESS_SMC32},
- {"FFA_RXTX_MAP_32 check", FFA_RXTX_MAP_SMC32, FFA_ERROR},
- {"FFA_RXTX_MAP_64 check", FFA_RXTX_MAP_SMC64, FFA_SUCCESS_SMC32},
- {"FFA_RXTX_UNMAP_32 check", FFA_RXTX_UNMAP, FFA_ERROR},
- {"FFA_PARTITION_INFO_GET_32 check", FFA_PARTITION_INFO_GET, FFA_SUCCESS_SMC32},
- {"FFA_ID_GET_32 check", FFA_ID_GET, FFA_SUCCESS_SMC32},
- {"FFA_MSG_POLL_32 check", FFA_MSG_POLL, FFA_SUCCESS_SMC32},
- {"FFA_MSG_WAIT_32 check", FFA_MSG_WAIT, FFA_SUCCESS_SMC32},
- {"FFA_YIELD_32 check", FFA_MSG_YIELD, FFA_SUCCESS_SMC32},
- {"FFA_RUN_32 check", FFA_MSG_RUN, FFA_SUCCESS_SMC32},
- {"FFA_MSG_SEND_32 check", FFA_MSG_SEND, FFA_SUCCESS_SMC32},
- {"FFA_MEM_DONATE_32 check", FFA_MEM_DONATE_SMC32, FFA_SUCCESS_SMC32},
- {"FFA_MEM_LEND_32 check", FFA_MEM_LEND_SMC32, FFA_SUCCESS_SMC32},
- {"FFA_MEM_SHARE_32 check", FFA_MEM_SHARE_SMC32, FFA_SUCCESS_SMC32},
- {"FFA_MEM_RETRIEVE_REQ_32 check", FFA_MEM_RETRIEVE_REQ_SMC32, FFA_SUCCESS_SMC32},
- {"FFA_MEM_RETRIEVE_RESP_32 check", FFA_MEM_RETRIEVE_RESP, FFA_SUCCESS_SMC32},
- {"FFA_MEM_RELINQUISH_32 check", FFA_MEM_RELINQUISH, FFA_SUCCESS_SMC32},
- {"FFA_MEM_RECLAIM_32 check", FFA_MEM_RECLAIM, FFA_SUCCESS_SMC32},
- {"Check non-existent command", 0xFFFF, FFA_ERROR}
+ {"FFA_ERROR_32", FFA_ERROR, FFA_SUCCESS_SMC32},
+ {"FFA_SUCCESS_32", FFA_SUCCESS_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_INTERRUPT_32", FFA_INTERRUPT, FFA_SUCCESS_SMC32},
+ {"FFA_VERSION_32", FFA_VERSION, FFA_SUCCESS_SMC32},
+ {"FFA_FEATURES_32", FFA_FEATURES, FFA_SUCCESS_SMC32},
+ {"FFA_RX_RELEASE_32", FFA_RX_RELEASE, FFA_SUCCESS_SMC32},
+ {"FFA_RXTX_MAP_32", FFA_RXTX_MAP_SMC32, FFA_ERROR},
+ {"FFA_RXTX_MAP_64", FFA_RXTX_MAP_SMC64, FFA_SUCCESS_SMC32},
+ {"FFA_RXTX_UNMAP_32", FFA_RXTX_UNMAP, FFA_SUCCESS_SMC32},
+ {"FFA_PARTITION_INFO_GET_32", FFA_PARTITION_INFO_GET, FFA_SUCCESS_SMC32},
+ {"FFA_ID_GET_32", FFA_ID_GET, FFA_SUCCESS_SMC32},
+ {"FFA_SPM_ID_GET_32", FFA_SPM_ID_GET, FFA_SUCCESS_SMC32, 0,
+ MAKE_FFA_VERSION(1, 1)},
+ {"FFA_MSG_WAIT_32", FFA_MSG_WAIT, FFA_SUCCESS_SMC32},
+ {"FFA_RUN_32", FFA_RUN, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_DONATE_32", FFA_MEM_DONATE_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_LEND_32", FFA_MEM_LEND_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_SHARE_32", FFA_MEM_SHARE_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RETRIEVE_REQ_32", FFA_MEM_RETRIEVE_REQ_SMC32,
+ FFA_SUCCESS_SMC32, FFA_FEATURES_MEM_RETRIEVE_REQ_NS_SUPPORT},
+ {"FFA_MEM_RETRIEVE_RESP_32", FFA_MEM_RETRIEVE_RESP, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RELINQUISH_32", FFA_MEM_RELINQUISH, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RECLAIM_32", FFA_MEM_RECLAIM, FFA_SUCCESS_SMC32},
+ {"FFA_NOTIFICATION_BITMAP_CREATE_32",
+ FFA_NOTIFICATION_BITMAP_CREATE, FFA_SUCCESS_SMC32},
+ {"FFA_NOTIFICATION_BITMAP_DESTROY_32",
+ FFA_NOTIFICATION_BITMAP_DESTROY, FFA_SUCCESS_SMC32},
+ {"FFA_NOTIFICATION_BIND_32", FFA_NOTIFICATION_BIND,
+ FFA_SUCCESS_SMC32},
+ {"FFA_NOTIFICATION_UNBIND_32", FFA_NOTIFICATION_UNBIND,
+ FFA_SUCCESS_SMC32},
+ {"FFA_NOTIFICATION_SET_32", FFA_NOTIFICATION_SET,
+ FFA_SUCCESS_SMC32},
+ {"FFA_NOTIFICATION_INFO_GET_64", FFA_NOTIFICATION_INFO_GET_SMC64,
+ FFA_SUCCESS_SMC32},
+ /* Indirect messaging is only supported in Nwd */
+ {"FFA_YIELD_32", FFA_MSG_YIELD, FFA_ERROR},
+ {"FFA_MSG_SEND_32", FFA_MSG_SEND, FFA_ERROR},
+ {"FFA_MSG_POLL_32", FFA_MSG_POLL, FFA_ERROR},
+ {"Check non-existent command", 0xFFFF, FFA_ERROR},
};
/*
@@ -199,10 +210,10 @@ unsigned int get_ffa_feature_test_target(
bool memory_retrieve(struct mailbox_buffers *mb,
struct ffa_memory_region **retrieved, uint64_t handle,
- ffa_vm_id_t sender, ffa_vm_id_t receiver,
- uint32_t mem_func)
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count, ffa_memory_region_flags_t flags)
{
- smc_ret_values ret;
+ struct ffa_value ret;
uint32_t fragment_size;
uint32_t total_size;
uint32_t descriptor_size;
@@ -212,24 +223,16 @@ bool memory_retrieve(struct mailbox_buffers *mb,
return false;
}
- /*
- * TODO: Revise shareability attribute in function call
- * below.
- * https://lists.trustedfirmware.org/pipermail/hafnium/2020-June/000023.html
- */
descriptor_size = ffa_memory_retrieve_request_init(
- mb->send, handle, sender, receiver, 0, 0,
- FFA_DATA_ACCESS_RW,
- FFA_INSTRUCTION_ACCESS_NX,
- FFA_MEMORY_NORMAL_MEM,
- FFA_MEMORY_CACHE_WRITE_BACK,
- FFA_MEMORY_OUTER_SHAREABLE);
+ mb->send, handle, sender, receivers, receiver_count, 0, flags,
+ FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+ FFA_MEMORY_INNER_SHAREABLE);
ret = ffa_mem_retrieve_req(descriptor_size, descriptor_size);
if (ffa_func_id(ret) != FFA_MEM_RETRIEVE_RESP) {
- ERROR("Couldn't retrieve the memory page. Error: %x\n",
- ffa_error_code(ret));
+ ERROR("%s: couldn't retrieve the memory page. Error: %d\n",
+ __func__, ffa_error_code(ret));
return false;
}
@@ -242,8 +245,8 @@ bool memory_retrieve(struct mailbox_buffers *mb,
* successful ffa_mem_retrieve_req, total_size must be equal to
* fragment_size.
*/
- total_size = ret.ret1;
- fragment_size = ret.ret2;
+ total_size = ret.arg1;
+ fragment_size = ret.arg2;
if (total_size != fragment_size) {
ERROR("Only expect one memory segment to be sent!\n");
@@ -268,10 +271,148 @@ bool memory_retrieve(struct mailbox_buffers *mb,
return true;
}
+bool hypervisor_retrieve_request(struct mailbox_buffers *mb, uint64_t handle,
+ void *out, uint32_t out_size)
+{
+ struct ffa_value ret;
+ uint32_t total_size;
+ uint32_t fragment_size;
+ uint32_t fragment_offset;
+ struct ffa_memory_region *region_out = out;
+
+ if (out == NULL || mb == NULL) {
+ ERROR("Invalid parameters!\n");
+ return false;
+ }
+
+ ffa_hypervisor_retrieve_request_init(mb->send, handle);
+ ret = ffa_mem_retrieve_req(sizeof(struct ffa_memory_region),
+ sizeof(struct ffa_memory_region));
+
+ if (ffa_func_id(ret) != FFA_MEM_RETRIEVE_RESP) {
+ ERROR("%s: couldn't retrieve the memory page. Error: %d\n",
+ __func__, ffa_error_code(ret));
+ return false;
+ }
+
+ /*
+ * Following total_size and fragment_size are useful to keep track
+ * of the state of transaction. When the sum of all fragment_size of all
+ * fragments is equal to total_size, the memory transaction has been
+ * completed.
+ */
+ total_size = ret.arg1;
+ fragment_size = ret.arg2;
+ fragment_offset = fragment_size;
+ VERBOSE("total_size=%d, fragment_size=%d, fragment_offset=%d\n",
+ total_size, fragment_size, fragment_offset);
+
+ if (fragment_size > PAGE_SIZE) {
+ ERROR("Fragment should be smaller than RX buffer!\n");
+ return false;
+ }
+ if (total_size > out_size) {
+ ERROR("output buffer is not large enough to store all "
+ "fragments (total_size=%d, max_size=%d)\n",
+ total_size, out_size);
+ return false;
+ }
+
+ /*
+ * Copy the received message to the out buffer. This is necessary
+ * because `mb->recv` will be overwritten if sending a fragmented
+ * message.
+ */
+ memcpy(out, mb->recv, fragment_size);
+
+ if (region_out->receiver_count == 0) {
+ VERBOSE("copied region has no recivers\n");
+ return false;
+ }
+
+ if (region_out->receiver_count > MAX_MEM_SHARE_RECIPIENTS) {
+ VERBOSE("SPMC memory sharing operations support max of %u "
+ "receivers!\n",
+ MAX_MEM_SHARE_RECIPIENTS);
+ return false;
+ }
+
+ while (fragment_offset < total_size) {
+ VERBOSE("Calling again. frag offset: %d; total: %d\n",
+ fragment_offset, total_size);
+ ret = ffa_rx_release();
+ if (ret.fid != FFA_SUCCESS_SMC32) {
+ ERROR("ffa_rx_release() failed: %d\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ ret = ffa_mem_frag_rx(handle, fragment_offset);
+ if (ret.fid != FFA_MEM_FRAG_TX) {
+ ERROR("ffa_mem_frag_rx() failed: %d\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ if (ffa_frag_handle(ret) != handle) {
+ ERROR("%s: fragment handle mismatch: expected %llu, "
+ "got "
+ "%llu\n",
+ __func__, handle, ffa_frag_handle(ret));
+ return false;
+ }
+
+ /* Sender MBZ at physical instance. */
+ if (ffa_frag_sender(ret) != 0) {
+ ERROR("%s: fragment sender mismatch: expected %d, got "
+ "%d\n",
+ __func__, 0, ffa_frag_sender(ret));
+ return false;
+ }
+
+ fragment_size = ret.arg2;
+ if (fragment_size == 0) {
+ ERROR("%s: fragment size must not be 0\n", __func__);
+ return false;
+ }
+
+ if (fragment_offset + fragment_size > out_size) {
+ ERROR("%s: fragment is too big to fit in out buffer "
+ "(%d > %d)\n",
+ __func__, fragment_offset + fragment_size,
+ out_size);
+ return false;
+ }
+
+ VERBOSE("copying fragment at offset %d with size %d\n",
+ fragment_offset, fragment_size);
+ memcpy((uint8_t *)out + fragment_offset, mb->recv,
+ fragment_size);
+
+ fragment_offset += fragment_size;
+ }
+
+ if (fragment_offset != total_size) {
+ ERROR("%s: fragment size mismatch: expected %d, got %d\n",
+ __func__, total_size, fragment_offset);
+ return false;
+ }
+
+ ret = ffa_rx_release();
+ if (ret.fid != FFA_SUCCESS_SMC32) {
+ ERROR("ffa_rx_release() failed: %d\n", ffa_error_code(ret));
+ return false;
+ }
+
+ VERBOSE("Memory Retrieved!\n");
+
+ return true;
+}
+
bool memory_relinquish(struct ffa_mem_relinquish *m, uint64_t handle,
- ffa_vm_id_t id)
+ ffa_id_t id)
{
- smc_ret_values ret;
+ struct ffa_value ret;
ffa_mem_relinquish_init(m, handle, 0, id);
ret = ffa_mem_relinquish();
@@ -285,48 +426,135 @@ bool memory_relinquish(struct ffa_mem_relinquish *m, uint64_t handle,
return true;
}
+bool send_fragmented_memory_region(
+ void *send_buffer,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t remaining_constituent_count,
+ uint32_t sent_length, uint32_t total_length, bool allocator_is_spmc,
+ struct ffa_value ret)
+{
+
+ uint64_t handle;
+ uint64_t handle_mask;
+ uint64_t expected_handle_mask =
+ allocator_is_spmc ? FFA_MEMORY_HANDLE_ALLOCATOR_SPMC
+ : FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
+ ffa_memory_handle_t fragment_handle = FFA_MEMORY_HANDLE_INVALID;
+ uint32_t fragment_length;
+
+ /* Send the remaining fragments. */
+ while (remaining_constituent_count != 0) {
+ VERBOSE("%s: %d constituents left to send.\n", __func__,
+ remaining_constituent_count);
+ if (ret.fid != FFA_MEM_FRAG_RX) {
+ ERROR("ffa_mem_frax_tx() failed: %d\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ if (fragment_handle == FFA_MEMORY_HANDLE_INVALID) {
+ fragment_handle = ffa_frag_handle(ret);
+ } else if (ffa_frag_handle(ret) != fragment_handle) {
+ ERROR("%s: fragment handle mismatch: expected %llu, "
+ "got %llu\n",
+ __func__, fragment_handle, ffa_frag_handle(ret));
+ return false;
+ }
+
+ if (ret.arg3 != sent_length) {
+ ERROR("%s: fragment length mismatch: expected %u, got "
+ "%lu\n",
+ __func__, sent_length, ret.arg3);
+ return false;
+ }
+
+ remaining_constituent_count = ffa_memory_fragment_init(
+ send_buffer, PAGE_SIZE,
+ constituents + constituent_count -
+ remaining_constituent_count,
+ remaining_constituent_count, &fragment_length);
+
+ ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
+ sent_length += fragment_length;
+ }
+
+ if (sent_length != total_length) {
+ ERROR("%s: fragment length mismatch: expected %u, got %u\n",
+ __func__, total_length, sent_length);
+ return false;
+ }
+
+ if (ret.fid != FFA_SUCCESS_SMC32) {
+ ERROR("%s: ffa_mem_frax_tx() failed: %d\n", __func__,
+ ffa_error_code(ret));
+ return false;
+ }
+
+ handle = ffa_mem_success_handle(ret);
+ handle_mask = (handle >> FFA_MEMORY_HANDLE_ALLOCATOR_SHIFT) &
+ FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
+
+ if (handle_mask != expected_handle_mask) {
+ ERROR("%s: handle mask mismatch: expected %llu, got %llu\n",
+ __func__, expected_handle_mask, handle_mask);
+ return false;
+ }
+
+ if (fragment_handle != FFA_MEMORY_HANDLE_INVALID && handle != fragment_handle) {
+ ERROR("%s: fragment handle mismatch: expectd %d, got %llu\n",
+ __func__, fragment_length, handle);
+ return false;
+ }
+
+ return true;
+}
+
/**
* Helper to call memory send function whose func id is passed as a parameter.
- * Returns a valid handle in case of successful operation or
- * FFA_MEMORY_HANDLE_INVALID if something goes wrong.
- *
- * TODO: Do memory send with 'ffa_memory_region' taking multiple segments
*/
ffa_memory_handle_t memory_send(
- struct ffa_memory_region *memory_region, uint32_t mem_func,
- uint32_t fragment_length, uint32_t total_length)
+ void *send_buffer, uint32_t mem_func,
+ const struct ffa_memory_region_constituent *constituents,
+ uint32_t constituent_count, uint32_t remaining_constituent_count,
+ uint32_t fragment_length, uint32_t total_length,
+ struct ffa_value *ret)
{
- smc_ret_values ret;
- ffa_vm_id_t receiver =
- memory_region->receivers[0].receiver_permissions.receiver;
-
- if (fragment_length != total_length) {
- ERROR("For now, fragment_length and total_length need to be"
- " equal");
+ if (remaining_constituent_count == 0 && fragment_length != total_length) {
+ ERROR("%s: fragment_length and total_length need "
+ "to be equal (fragment_length = %d, total_length = %d)\n",
+ __func__, fragment_length, total_length);
return FFA_MEMORY_HANDLE_INVALID;
}
switch (mem_func) {
case FFA_MEM_SHARE_SMC32:
- ret = ffa_mem_share(total_length, fragment_length);
+ *ret = ffa_mem_share(total_length, fragment_length);
break;
case FFA_MEM_LEND_SMC32:
- ret = ffa_mem_lend(total_length, fragment_length);
+ *ret = ffa_mem_lend(total_length, fragment_length);
break;
case FFA_MEM_DONATE_SMC32:
- ret = ffa_mem_donate(total_length, fragment_length);
+ *ret = ffa_mem_donate(total_length, fragment_length);
break;
default:
- ERROR("TFTF - Invalid func id %x!\n", mem_func);
+ ERROR("%s: Invalid func id %x!\n", __func__, mem_func);
return FFA_MEMORY_HANDLE_INVALID;
}
- if (is_ffa_call_error(ret)) {
- ERROR("Failed to send message to: %x\n", receiver);
+ if (is_ffa_call_error(*ret)) {
+ VERBOSE("%s: Failed to send memory: %d\n", __func__,
+ ffa_error_code(*ret));
+ return FFA_MEMORY_HANDLE_INVALID;
+ }
+
+ if (!send_fragmented_memory_region(
+ send_buffer, constituents, constituent_count,
+ remaining_constituent_count, fragment_length, total_length,
+ true, *ret)) {
return FFA_MEMORY_HANDLE_INVALID;
}
- return ffa_mem_success_handle(ret);
+ return ffa_mem_success_handle(*ret);
}
/**
@@ -335,36 +563,270 @@ ffa_memory_handle_t memory_send(
* doing it in this file for simplicity and for testing purposes.
*/
ffa_memory_handle_t memory_init_and_send(
- struct ffa_memory_region *memory_region, size_t memory_region_max_size,
- ffa_vm_id_t sender, ffa_vm_id_t receiver,
+ void *send_buffer, size_t memory_region_max_size, ffa_id_t sender,
+ struct ffa_memory_access receivers[], uint32_t receiver_count,
const struct ffa_memory_region_constituent *constituents,
- uint32_t constituents_count, uint32_t mem_func)
+ uint32_t constituents_count, uint32_t mem_func, struct ffa_value *ret)
{
uint32_t remaining_constituent_count;
uint32_t total_length;
uint32_t fragment_length;
- enum ffa_data_access data_access = (mem_func == FFA_MEM_DONATE_SMC32) ?
- FFA_DATA_ACCESS_NOT_SPECIFIED :
- FFA_DATA_ACCESS_RW;
+ enum ffa_memory_type type =
+ (receiver_count == 1 && mem_func != FFA_MEM_SHARE_SMC32)
+ ? FFA_MEMORY_NOT_SPECIFIED_MEM
+ : FFA_MEMORY_NORMAL_MEM;
remaining_constituent_count = ffa_memory_region_init(
- memory_region, memory_region_max_size, sender, receiver, constituents,
- constituents_count, 0, 0, data_access,
- FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
- FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
- FFA_MEMORY_INNER_SHAREABLE, &total_length, &fragment_length
- );
+ send_buffer, memory_region_max_size, sender, receivers,
+ receiver_count, constituents, constituents_count, 0, 0, type,
+ FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE,
+ &total_length, &fragment_length);
+
+ return memory_send(send_buffer, mem_func, constituents,
+ constituents_count, remaining_constituent_count,
+ fragment_length, total_length, ret);
+}
+
+static bool ffa_uuid_equal(const struct ffa_uuid uuid1,
+ const struct ffa_uuid uuid2)
+{
+ return (uuid1.uuid[0] == uuid2.uuid[0]) &&
+ (uuid1.uuid[1] == uuid2.uuid[1]) &&
+ (uuid1.uuid[2] == uuid2.uuid[2]) &&
+ (uuid1.uuid[3] == uuid2.uuid[3]);
+}
+static bool ffa_partition_info_regs_get_part_info(
+ struct ffa_value *args, uint8_t idx,
+ struct ffa_partition_info *partition_info)
+{
/*
- * For simplicity of the test, and at least for the time being,
- * the following condition needs to be true.
+ * The list of pointers to args in return value: arg0/func encodes ff-a
+ * function, arg1 is reserved, arg2 encodes indices. arg3 and greater
+ * values reflect partition properties.
*/
- if (remaining_constituent_count != 0U) {
- ERROR("Remaining constituent should be 0\n");
- return FFA_MEMORY_HANDLE_INVALID;
+ uint64_t *arg_ptrs = (uint64_t *)args + ((idx * 3) + 3);
+ uint64_t info, uuid_lo, uuid_high;
+
+ /*
+ * Each partition information is encoded in 3 registers, so there can be
+ * a maximum of 5 entries.
+ */
+ if (idx >= 5 || !partition_info) {
+ return false;
+ }
+
+ info = *arg_ptrs;
+
+ arg_ptrs++;
+ uuid_lo = *arg_ptrs;
+
+ arg_ptrs++;
+ uuid_high = *arg_ptrs;
+
+ /*
+ * As defined in FF-A 1.2 ALP0, 14.9 FFA_PARTITION_INFO_GET_REGS.
+ */
+ partition_info->id = info & 0xFFFFU;
+ partition_info->exec_context = (info >> 16) & 0xFFFFU;
+ partition_info->properties = (info >> 32);
+ partition_info->uuid.uuid[0] = uuid_lo & 0xFFFFFFFFU;
+ partition_info->uuid.uuid[1] = (uuid_lo >> 32) & 0xFFFFFFFFU;
+ partition_info->uuid.uuid[2] = uuid_high & 0xFFFFFFFFU;
+ partition_info->uuid.uuid[3] = (uuid_high >> 32) & 0xFFFFFFFFU;
+
+ return true;
+}
+
+static bool ffa_compare_partition_info(
+ const struct ffa_uuid uuid,
+ const struct ffa_partition_info *info,
+ const struct ffa_partition_info *expected)
+{
+ bool result = true;
+ /*
+ * If a UUID is specified then the UUID returned in the
+ * partition info descriptor MBZ.
+ */
+ struct ffa_uuid expected_uuid =
+ ffa_uuid_equal(uuid, NULL_UUID) ? expected->uuid : NULL_UUID;
+
+ if (info->id != expected->id) {
+ ERROR("Wrong ID. Expected %x, got %x\n", expected->id, info->id);
+ result = false;
+ }
+
+ if (info->exec_context != expected->exec_context) {
+ ERROR("Wrong context. Expected %d, got %d\n",
+ expected->exec_context,
+ info->exec_context);
+ result = false;
+ }
+ if (info->properties != expected->properties) {
+ ERROR("Wrong properties. Expected %d, got %d\n",
+ expected->properties,
+ info->properties);
+ result = false;
+ }
+
+ if (!ffa_uuid_equal(info->uuid, expected_uuid)) {
+ ERROR("Wrong UUID. Expected %x %x %x %x, "
+ "got %x %x %x %x\n",
+ expected_uuid.uuid[0],
+ expected_uuid.uuid[1],
+ expected_uuid.uuid[2],
+ expected_uuid.uuid[3],
+ info->uuid.uuid[0],
+ info->uuid.uuid[1],
+ info->uuid.uuid[2],
+ info->uuid.uuid[3]);
+ result = false;
+ }
+
+ return result;
+}
+
+/**
+ * Sends a ffa_partition_info_get_regs request and returns the information
+ * returned in registers in the output parameters. Validation against
+ * expected results shall be done by the caller outside the function.
+ */
+bool ffa_partition_info_regs_helper(const struct ffa_uuid uuid,
+ const struct ffa_partition_info *expected,
+ const uint16_t expected_size)
+{
+ /*
+ * TODO: For now, support only one invocation. Can be enhanced easily
+ * to extend to arbitrary number of partitions.
+ */
+ if (expected_size > 5) {
+ ERROR("%s only supports information received in"
+ " one invocation of the ABI (5 partitions)\n",
+ __func__);
+ return false;
+ }
+
+ struct ffa_value ret = ffa_partition_info_get_regs(uuid, 0, 0);
+
+ if (ffa_func_id(ret) != FFA_SUCCESS_SMC64) {
+ return false;
+ }
+
+ if (ffa_partition_info_regs_partition_count(ret) !=
+ expected_size) {
+ ERROR("Unexpected number of partitions %d (expected %d)\n",
+ ffa_partition_info_regs_partition_count(ret),
+ expected_size);
+ return false;
+ }
+
+ if (ffa_partition_info_regs_entry_size(ret) !=
+ sizeof(struct ffa_partition_info)) {
+ ERROR("Unexpected partition info descriptor size %d\n",
+ ffa_partition_info_regs_entry_size(ret));
+ return false;
+ }
+
+ for (unsigned int i = 0U; i < expected_size; i++) {
+ struct ffa_partition_info info = { 0 };
+
+ ffa_partition_info_regs_get_part_info(&ret, i, &info);
+ if (!ffa_compare_partition_info(uuid, &info, &expected[i])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Sends a ffa_partition_info request and checks the response against the
+ * target.
+ */
+bool ffa_partition_info_helper(struct mailbox_buffers *mb,
+ const struct ffa_uuid uuid,
+ const struct ffa_partition_info *expected,
+ const uint16_t expected_size)
+{
+ bool result = true;
+ struct ffa_value ret = ffa_partition_info_get(uuid);
+
+ if (ffa_func_id(ret) == FFA_SUCCESS_SMC32) {
+ if (ffa_partition_info_count(ret) != expected_size) {
+ ERROR("Unexpected number of partitions %d\n",
+ ffa_partition_info_count(ret));
+ return false;
+ }
+ if (ffa_partition_info_desc_size(ret) !=
+ sizeof(struct ffa_partition_info)) {
+ ERROR("Unexpected partition info descriptor size %d\n",
+ ffa_partition_info_desc_size(ret));
+ return false;
+ }
+ const struct ffa_partition_info *info =
+ (const struct ffa_partition_info *)(mb->recv);
+
+ for (unsigned int i = 0U; i < expected_size; i++) {
+ if (!ffa_compare_partition_info(uuid, &info[i], &expected[i]))
+ result = false;
+ }
+ }
+
+ ret = ffa_rx_release();
+ if (is_ffa_call_error(ret)) {
+ ERROR("Failed to release RX buffer\n");
+ result = false;
+ }
+ return result;
+}
+
+static bool configure_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest,
+ bool enable)
+{
+ struct ffa_value ret_values;
+
+ ret_values = cactus_interrupt_cmd(source, dest, IRQ_TWDOG_INTID,
+ enable, INTERRUPT_TYPE_IRQ);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response message while configuring"
+ " TWDOG interrupt\n");
+ return false;
+ }
+
+ if (cactus_get_response(ret_values) != CACTUS_SUCCESS) {
+ ERROR("Failed to configure Trusted Watchdog interrupt\n");
+ return false;
}
+ return true;
+}
+
+bool enable_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest)
+{
+ return configure_trusted_wdog_interrupt(source, dest, true);
+}
+
+bool disable_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest)
+{
+ return configure_trusted_wdog_interrupt(source, dest, false);
+}
+
+/**
+ * Initializes receiver permissions in a memory transaction descriptor, using
+ * `mem_func` to determine the appropriate permissions.
+ */
+struct ffa_memory_access ffa_memory_access_init_permissions_from_mem_func(
+ ffa_id_t receiver_id, uint32_t mem_func)
+{
+
+ enum ffa_instruction_access instruction_access =
+ FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED;
+ enum ffa_data_access data_access =
+ (mem_func == FFA_MEM_DONATE_SMC32)
+ ? FFA_DATA_ACCESS_NOT_SPECIFIED
+ : FFA_DATA_ACCESS_RW;
- return memory_send(memory_region, mem_func, fragment_length,
- total_length);
+ return ffa_memory_access_init(receiver_id, data_access,
+ instruction_access, 0, NULL);
}
diff --git a/tftf/tests/runtime_services/secure_service/spm_test_helpers.c b/tftf/tests/runtime_services/secure_service/spm_test_helpers.c
new file mode 100644
index 000000000..054e774a7
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/spm_test_helpers.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <power_management.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+static struct mailbox_buffers test_mb = {.send = NULL, .recv = NULL};
+
+bool reset_tftf_mailbox(void)
+{
+ if (is_ffa_call_error(ffa_rxtx_unmap())) {
+ return false;
+ }
+
+ test_mb.send = NULL;
+ test_mb.recv = NULL;
+
+ return true;
+}
+
+bool get_tftf_mailbox(struct mailbox_buffers *mb)
+{
+ struct ffa_value ret;
+
+ if (test_mb.recv == NULL || test_mb.send == NULL) {
+ CONFIGURE_AND_MAP_MAILBOX(test_mb, PAGE_SIZE, ret);
+ if (is_ffa_call_error(ret)) {
+ return false;
+ }
+ }
+
+ *mb = test_mb;
+
+ return true;
+}
+
+test_result_t check_spmc_testing_set_up(
+ uint32_t ffa_version_major, uint32_t ffa_version_minor,
+ const struct ffa_uuid *ffa_uuids, size_t ffa_uuids_size)
+{
+ struct mailbox_buffers mb;
+
+ if (ffa_uuids == NULL) {
+ ERROR("Invalid parameter ffa_uuids!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(ffa_version_major,
+ ffa_version_minor);
+
+ /**********************************************************************
+ * If OP-TEE is SPMC skip the current test.
+ **********************************************************************/
+ if (check_spmc_execution_level()) {
+ VERBOSE("OPTEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ GET_TFTF_MAILBOX(mb);
+
+ for (unsigned int i = 0U; i < ffa_uuids_size; i++)
+ SKIP_TEST_IF_FFA_ENDPOINT_NOT_DEPLOYED(*mb, ffa_uuids[i]);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t spm_run_multi_core_test(uintptr_t cpu_on_handler,
+ event_t *cpu_done)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos, cpu_node, mpidr;
+ int32_t ret;
+
+ VERBOSE("Powering on all cpus.\n");
+
+ for (unsigned int i = 0U; i < PLATFORM_CORE_COUNT; i++) {
+ tftf_init_event(&cpu_done[i]);
+ }
+
+ /* Power on each secondary CPU one after the other. */
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (mpidr == lead_mpid) {
+ continue;
+ }
+
+ ret = tftf_cpu_on(mpidr, cpu_on_handler, 0U);
+ if (ret != 0) {
+ ERROR("tftf_cpu_on mpidr 0x%x returns %d\n",
+ mpidr, ret);
+ }
+
+ /* Wait for the secondary CPU to be ready. */
+ core_pos = platform_get_core_pos(mpidr);
+ tftf_wait_for_event(&cpu_done[core_pos]);
+ }
+
+ VERBOSE("Done exiting.\n");
+
+ return TEST_RESULT_SUCCESS;
+}
+
+bool spm_core_sp_init(ffa_id_t sp_id)
+{
+ /*
+ * Secure Partitions secondary ECs need one round of ffa_run to reach
+ * the message loop.
+ */
+ if (sp_id != SP_ID(1)) {
+ uint32_t core_pos = get_current_core_id();
+ struct ffa_value ret = ffa_run(sp_id, core_pos);
+
+ if (ffa_func_id(ret) != FFA_MSG_WAIT) {
+ ERROR("Failed to run SP%x on core %u\n",
+ sp_id, core_pos);
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c b/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c
index 0a722e497..1f8e81c1e 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -14,6 +14,7 @@
#include <lib/events.h>
#include <lib/power_management.h>
#include <platform.h>
+#include <spm_test_helpers.h>
#include <test_helpers.h>
#define ECHO_VAL1 U(0xa0a0a0a0)
@@ -24,14 +25,13 @@ static const struct ffa_uuid expected_sp_uuids[] = {
{PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
};
-
static event_t cpu_booted[PLATFORM_CORE_COUNT];
-static test_result_t send_cactus_echo_cmd(ffa_vm_id_t sender,
- ffa_vm_id_t dest,
+static test_result_t send_cactus_echo_cmd(ffa_id_t sender,
+ ffa_id_t dest,
uint64_t value)
{
- smc_ret_values ret;
+ struct ffa_value ret;
ret = cactus_echo_send_cmd(sender, dest, value);
/*
@@ -92,12 +92,12 @@ test_result_t test_ffa_direct_messaging(void)
* otherwise.
* For the CACTUS_SUCCESS response, the test returns TEST_RESULT_SUCCESS.
*/
-static test_result_t send_cactus_req_echo_cmd(ffa_vm_id_t sender,
- ffa_vm_id_t dest,
- ffa_vm_id_t echo_dest,
+static test_result_t send_cactus_req_echo_cmd(ffa_id_t sender,
+ ffa_id_t dest,
+ ffa_id_t echo_dest,
uint64_t value)
{
- smc_ret_values ret;
+ struct ffa_value ret;
ret = cactus_req_echo_send_cmd(sender, dest, echo_dest, value);
@@ -128,13 +128,13 @@ test_result_t test_ffa_sp_to_sp_direct_messaging(void)
* The following the tests are intended to test the handling of a
* direct message request with a VM's ID as a the sender.
*/
- result = send_cactus_req_echo_cmd(HYP_ID + 1, SP_ID(2), SP_ID(3),
+ result = send_cactus_req_echo_cmd(VM_ID(1), SP_ID(2), SP_ID(3),
ECHO_VAL2);
if (result != TEST_RESULT_SUCCESS) {
return result;
}
- result = send_cactus_req_echo_cmd(HYP_ID + 2, SP_ID(3), SP_ID(1),
+ result = send_cactus_req_echo_cmd(VM_ID(2), SP_ID(3), SP_ID(1),
ECHO_VAL3);
return result;
@@ -142,7 +142,7 @@ test_result_t test_ffa_sp_to_sp_direct_messaging(void)
test_result_t test_ffa_sp_to_sp_deadlock(void)
{
- smc_ret_values ret;
+ struct ffa_value ret;
/**********************************************************************
* Check SPMC has ffa_version and expected FFA endpoints are deployed.
@@ -165,14 +165,12 @@ test_result_t test_ffa_sp_to_sp_deadlock(void)
/**
* Handler that is passed during tftf_cpu_on to individual CPU cores.
* Runs a specific core and send a direct message request.
- * Expects core_pos | SP_ID as a response.
*/
static test_result_t cpu_on_handler(void)
{
- unsigned int mpid = read_mpidr_el1() & MPID_MASK;
- unsigned int core_pos = platform_get_core_pos(mpid);
+ unsigned int core_pos = get_current_core_id();
test_result_t ret = TEST_RESULT_SUCCESS;
- smc_ret_values ffa_ret;
+ struct ffa_value ffa_ret;
/*
* Send a direct message request to SP1 (MP SP) from current physical
@@ -255,48 +253,9 @@ out:
*/
test_result_t test_ffa_secondary_core_direct_msg(void)
{
- unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
- unsigned int core_pos, cpu_node, mpidr;
- int32_t ret;
-
/**********************************************************************
* Check SPMC has ffa_version and expected FFA endpoints are deployed.
**********************************************************************/
CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
-
- for (unsigned int i = 0U; i < PLATFORM_CORE_COUNT; i++) {
- tftf_init_event(&cpu_booted[i]);
- }
-
- for_each_cpu(cpu_node) {
- mpidr = tftf_get_mpidr_from_node(cpu_node);
- if (mpidr == lead_mpid) {
- continue;
- }
-
- ret = tftf_cpu_on(mpidr, (uintptr_t)cpu_on_handler, 0U);
- if (ret != 0) {
- ERROR("tftf_cpu_on mpidr 0x%x returns %d\n", mpidr, ret);
- }
- }
-
- VERBOSE("Waiting secondary CPUs to turn off ...\n");
-
- for_each_cpu(cpu_node) {
- mpidr = tftf_get_mpidr_from_node(cpu_node);
- if (mpidr == lead_mpid) {
- continue;
- }
-
- core_pos = platform_get_core_pos(mpidr);
- tftf_wait_for_event(&cpu_booted[core_pos]);
- }
-
- VERBOSE("Done exiting.\n");
-
- /**********************************************************************
- * All tests passed.
- **********************************************************************/
-
- return TEST_RESULT_SUCCESS;
+ return spm_run_multi_core_test((uintptr_t)cpu_on_handler, cpu_booted);
}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_exceptions.c b/tftf/tests/runtime_services/secure_service/test_ffa_exceptions.c
new file mode 100644
index 000000000..0a345d4b2
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_exceptions.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <cactus_test_cmds.h>
+#include "ffa_helpers.h"
+#include <debug.h>
+#include <ffa_endpoints.h>
+#include <ffa_svc.h>
+#include <host_realm_helper.h>
+#include <irq.h>
+#include <platform.h>
+#include <smccc.h>
+#include <spm_common.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+
+#define SENDER HYP_ID
+#define RECEIVER SP_ID(1)
+
+static __aligned(PAGE_SIZE) uint64_t share_page[PAGE_SIZE / sizeof(uint64_t)];
+
+static const struct ffa_uuid expected_sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
+};
+
+/**
+ * @Test_Aim@ Check a realm region cannot be accessed from a secure partition.
+ *
+ * This test shares a TFTF allocated buffer with a secure partition through
+ * FF-A memory sharing operation. The buffer is initially marked NS in the GPT
+ * and transitioned to realm after sharing. Then, the SP is invoked to retrieve
+ * the region (map it to its S2 translation regime), and maps it to its secure
+ * S1 translation regime. It then attempts a read access which results in the
+ * PE triggering a GPF caught by a custom synchronous abort handler.
+ *
+ */
+test_result_t rl_memory_cannot_be_accessed_in_s(void)
+{
+ struct ffa_memory_region_constituent constituents[] = {
+ {
+ (void *)share_page, 1, 0
+ }
+ };
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+ ffa_memory_handle_t handle;
+ struct mailbox_buffers mb;
+ struct ffa_value ret;
+ u_register_t retmm;
+
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(
+ RECEIVER, FFA_MEM_SHARE_SMC32);
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ handle = memory_init_and_send(mb.send, PAGE_SIZE, SENDER, &receiver, 1,
+ constituents, constituents_count,
+ FFA_MEM_SHARE_SMC32, &ret);
+
+ if (handle == FFA_MEMORY_HANDLE_INVALID) {
+ return TEST_RESULT_FAIL;
+ }
+
+ VERBOSE("TFTF - Handle: %llx Address: %p\n",
+ handle, constituents[0].address);
+
+ host_rmi_init_cmp_result();
+
+ /* Delegate the shared page to Realm. */
+ retmm = host_rmi_granule_delegate((u_register_t)&share_page);
+ if (retmm != 0UL) {
+ ERROR("Granule delegate failed, ret=0x%lx\n", retmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Retrieve the shared page and attempt accessing it.
+ * Tell SP to expect an exception.
+ */
+ ret = cactus_mem_send_cmd(SENDER, RECEIVER, FFA_MEM_SHARE_SMC32,
+ handle, 0, 1, true);
+
+ /* Undelegate the shared page. */
+ retmm = host_rmi_granule_undelegate((u_register_t)&share_page);
+ if (retmm != 0UL) {
+ ERROR("Granule undelegate failed, ret=0x%lx\n", retmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (is_ffa_call_error(ffa_mem_reclaim(handle, 0))) {
+ ERROR("Memory reclaim failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Expect success response with value 1 hinting an exception
+ * triggered while the SP accessed the region.
+ */
+ if (!(cactus_get_response(ret) == CACTUS_SUCCESS &&
+ cactus_error_code(ret) == 1)) {
+ ERROR("Exceptions test failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_features.c b/tftf/tests/runtime_services/secure_service/test_ffa_features.c
deleted file mode 100644
index e4cd845fd..000000000
--- a/tftf/tests/runtime_services/secure_service/test_ffa_features.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <spm_common.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-
-test_result_t test_ffa_features(void)
-{
- SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 0);
-
- /* Check if SPMC is OP-TEE at S-EL1 */
- if (check_spmc_execution_level()) {
- /* FFA_FEATURES is not yet supported in OP-TEE */
- return TEST_RESULT_SUCCESS;
- }
-
- smc_ret_values ffa_ret;
- const struct ffa_features_test *ffa_feature_test_target;
- unsigned int i, test_target_size =
- get_ffa_feature_test_target(&ffa_feature_test_target);
-
- for (i = 0U; i < test_target_size; i++) {
- ffa_ret = ffa_features(ffa_feature_test_target[i].feature);
- if (ffa_func_id(ffa_ret) != ffa_feature_test_target[i].expected_ret) {
- tftf_testcase_printf("%s returned %x, expected %x\n",
- ffa_feature_test_target[i].test_name,
- ffa_func_id(ffa_ret),
- ffa_feature_test_target[i].expected_ret);
- return TEST_RESULT_FAIL;
- }
- if ((ffa_feature_test_target[i].expected_ret == FFA_ERROR) &&
- (ffa_error_code(ffa_ret) != FFA_ERROR_NOT_SUPPORTED)) {
- tftf_testcase_printf("%s failed for the wrong reason: "
- "returned %x, expected %x\n",
- ffa_feature_test_target[i].test_name,
- ffa_error_code(ffa_ret),
- FFA_ERROR_NOT_SUPPORTED);
- return TEST_RESULT_FAIL;
- }
- }
-
- return TEST_RESULT_SUCCESS;
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_group0_interrupts.c b/tftf/tests/runtime_services/secure_service/test_ffa_group0_interrupts.c
new file mode 100644
index 000000000..c6c719428
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_group0_interrupts.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cactus_test_cmds.h>
+#include <ffa_endpoints.h>
+#include <platform.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+
+#define SP_SLEEP_TIME 200U
+#define NS_TIME_SLEEP 200U
+
+#define SENDER HYP_ID
+#define RECEIVER SP_ID(1)
+
+static const struct ffa_uuid expected_sp_uuids[] = { {PRIMARY_UUID} };
+
+test_result_t test_ffa_group0_interrupt_sp_running(void)
+{
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Send request to first Cactus SP to sleep for 200ms.*/
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER, SP_SLEEP_TIME);
+
+ /*
+ * SBSA secure watchdog timer fires every 100ms. Hence a Group0 secure
+ * interrupt should trigger during this time.
+ */
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for sleep command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure elapsed time not less than sleep time. */
+ if (cactus_get_response(ret_values) < SP_SLEEP_TIME) {
+ ERROR("Lapsed time less than requested sleep time\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_ffa_group0_interrupt_in_nwd(void)
+{
+ uint64_t time1;
+ volatile uint64_t time2, time_lapsed;
+ uint64_t timer_freq = read_cntfrq_el0();
+
+ time1 = syscounter_read();
+
+ /*
+ * Sleep for NS_TIME_SLEEP ms. This ensures SBSA secure wdog timer
+ * triggers during this time.
+ */
+ waitms(NS_TIME_SLEEP);
+ time2 = syscounter_read();
+
+ /* Lapsed time should be at least equal to sleep time. */
+ time_lapsed = ((time2 - time1) * 1000) / timer_freq;
+
+ if (time_lapsed < NS_TIME_SLEEP) {
+ ERROR("Time elapsed less than expected value: %llu vs %u\n",
+ time_lapsed, NS_TIME_SLEEP);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_interrupts.c b/tftf/tests/runtime_services/secure_service/test_ffa_interrupts.c
index 7c70de2c3..454ea0570 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_interrupts.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_interrupts.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,6 +7,7 @@
#include <cactus_test_cmds.h>
#include <ffa_endpoints.h>
#include <ffa_helpers.h>
+#include <spm_test_helpers.h>
#include <test_helpers.h>
#include <timer.h>
@@ -14,10 +15,14 @@ static volatile int timer_irq_received;
#define SENDER HYP_ID
#define RECEIVER SP_ID(1)
-#define SLEEP_TIME 200U
+#define RECEIVER_2 SP_ID(2)
+#define RECEIVER_3 SP_ID(3)
+#define TIMER_DURATION 50U
+#define SLEEP_TIME 100U
+#define SLEEP_TIME_FWD 200U
static const struct ffa_uuid expected_sp_uuids[] = {
- {PRIMARY_UUID}
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
};
/*
@@ -31,69 +36,392 @@ static int timer_handler(void *data)
return 0;
}
+static int program_timer(unsigned long milli_secs)
+{
+ /* Program timer. */
+ timer_irq_received = 0;
+ tftf_timer_register_handler(timer_handler);
+
+ return tftf_program_timer(milli_secs);
+}
+
+static int check_timer_interrupt(void)
+{
+ /* Check that the timer interrupt has been handled in NWd(TFTF). */
+ tftf_cancel_timer();
+ tftf_timer_unregister_handler();
+
+ return timer_irq_received;
+}
+
/*
- * @Test_Aim@ Test non-secure interrupts while executing Secure Partition.
+ * @Test_Aim@ Test non-secure interrupts while a Secure Partition capable
+ * of managed exit is executing.
*
- * 1. Enable managed exit interrupt by sending interrupt_enable command to
- * Cactus.
- *
- * 2. Register a handler for the non-secure timer interrupt. Program it to fire
+ * 1. Register a handler for the non-secure timer interrupt. Program it to fire
* in a certain time.
*
- * 3. Send a blocking request to Cactus to execute in busy loop.
+ * 2. Send a direct request to Cactus SP to execute in busy loop.
*
- * 4. While executing in busy loop, the non-secure timer should
+ * 3. While executing in busy loop, the non-secure timer should
* fire and trap into SPM running at S-EL2 as FIQ.
*
- * 5. SPM injects a managed exit virtual FIQ into Cactus (as configured in the
+ * 4. SPM injects a managed exit virtual FIQ into Cactus (as configured in the
* interrupt enable call), causing it to run its interrupt handler.
*
- * 6. Cactus's managed exit handler acknowledges interrupt arrival by
+ * 5. Cactus's managed exit handler acknowledges interrupt arrival by
* requesting the interrupt id to the SPMC, and check if it is the
* MANAGED_EXIT_INTERRUPT_ID.
*
- * 7. Check whether the pending non-secure timer interrupt successfully got
+ * 6. Check whether the pending non-secure timer interrupt successfully got
* handled in TFTF.
*
- * 8. Send a direct message request command to resume Cactus's execution.
- * It resumes in the sleep loop and completes it. It then returns with
- * a direct message response. Check if time lapsed is greater than
- * sleeping time.
+ * 7. Send a new sleep command to Cactus SP. An error response must be sent
+ * back by the Cactus SP with CACTUS_ERROR_TEST as the error code.
+ *
+ * 8. Send a command asking the SP to resume after managed exit. SP resumes in
+ * the suspended sleep loop and completes it. It then returns with a direct
+ * message response. Check if time lapsed is greater than sleeping time.
*
*/
-test_result_t test_ffa_ns_interrupt(void)
+test_result_t test_ffa_ns_interrupt_managed_exit(void)
{
int ret;
- smc_ret_values ret_values;
+ struct ffa_value ret_values;
- CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ ret = program_timer(TIMER_DURATION);
+ if (ret < 0) {
+ ERROR("Failed to program timer (%d)\n", ret);
+ return TEST_RESULT_FAIL;
+ }
- /* Enable managed exit interrupt as FIQ in the secure side. */
- ret_values = cactus_interrupt_cmd(SENDER, RECEIVER, MANAGED_EXIT_INTERRUPT_ID,
- true, INTERRUPT_TYPE_FIQ);
+ /* Send request to primary Cactus to sleep for 100ms. */
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER, SLEEP_TIME);
if (!is_ffa_direct_response(ret_values)) {
return TEST_RESULT_FAIL;
}
- if (cactus_get_response(ret_values) != CACTUS_SUCCESS) {
- ERROR("Failed to enable Managed exit interrupt\n");
+ /*
+ * Managed exit interrupt occurs during this time, Cactus
+ * will respond with interrupt ID.
+ */
+ if (cactus_get_response(ret_values) != MANAGED_EXIT_INTERRUPT_ID) {
+ ERROR("Managed exit interrupt did not occur!\n");
return TEST_RESULT_FAIL;
}
- /* Program timer */
- timer_irq_received = 0;
- tftf_timer_register_handler(timer_handler);
+ if (check_timer_interrupt() == 0) {
+ ERROR("Timer interrupt hasn't actually been handled.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send a command asking the SP to resume after managed exit. This
+ * effectively resumes the Cactus in the sleep routine. Note that
+ * Cactus should return error if the current endpoint sent a new
+ * command.
+ */
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER, SLEEP_TIME);
+
+ if (cactus_get_response(ret_values) != CACTUS_ERROR &&
+ cactus_error_code(ret_values) != CACTUS_ERROR_TEST) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret_values = cactus_resume_after_managed_exit(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) == CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure elapsed time not less than sleep time. */
+ if (cactus_get_response(ret_values) < SLEEP_TIME) {
+ ERROR("Lapsed time less than requested sleep time\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test the scenario where a non-secure interrupt triggers while a
+ * Secure Partition,that specified action for NS interrupt as SIGNALABLE, is
+ * executing.
+ *
+ * 1. Register a handler for the non-secure timer interrupt. Program it to fire
+ * in a certain time.
+ *
+ * 2. Send a direct request to Cactus SP to execute in busy loop.
+ *
+ * 3. While executing in busy loop, the non-secure timer should fire. Cactus SP
+ * should be preempted by non-secure interrupt.
+ *
+ * 4. Execution traps to SPMC running at S-EL2 as FIQ. SPMC returns control to
+ * the normal world through FFA_INTERRUPT ABI for it to handle the non-secure
+ * interrupt.
+ *
+ * 5. Check whether the pending non-secure timer interrupt successfully got
+ * handled in the normal world by TFTF.
+ *
+ * 6. Resume the Cactus SP using FFA_RUN ABI for it to complete the sleep
+ * routine.
+ *
+ * 7. Ensure the Cactus SP sends the DIRECT RESPONSE message.
+ *
+ * 8. Check if time lapsed is greater than sleep time.
+ *
+ */
+test_result_t test_ffa_ns_interrupt_signaled(void)
+{
+ int ret;
+ struct ffa_value ret_values;
+ unsigned int core_pos = get_current_core_id();
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ ret = program_timer(TIMER_DURATION);
+ if (ret < 0) {
+ ERROR("Failed to program timer (%d)\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Send request to secondary Cactus to sleep for 100ms. */
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER_2, SLEEP_TIME);
+
+ if (check_timer_interrupt() == 0) {
+ ERROR("Timer interrupt hasn't actually been handled.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Cactus SP should be preempted by non-secure interrupt. SPMC
+ * returns control to the normal world through FFA_INTERRUPT ABI
+ * for it to handle the non-secure interrupt.
+ */
+ if (ffa_func_id(ret_values) != FFA_INTERRUPT) {
+ ERROR("Expected FFA_INTERRUPT as return status!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Ensure SPMC returns FFA_ERROR with BUSY error code when a direct
+ * request message is sent to the preempted SP.
+ */
+ ret_values = cactus_echo_send_cmd(SENDER, RECEIVER_2, ECHO_VAL1);
+
+ if ((ffa_func_id(ret_values) != FFA_ERROR) ||
+ (ffa_error_code(ret_values) != FFA_ERROR_BUSY)) {
+ ERROR("Expected FFA_ERROR(BUSY)! Got %x(%x)\n",
+ ffa_func_id(ret_values), ffa_error_code(ret_values));
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Resume the Cactus SP using FFA_RUN ABI for it to complete the
+ * sleep routine and send the direct response message.
+ */
+ VERBOSE("Resuming %x\n", RECEIVER_2);
+ ret_values = ffa_run(RECEIVER_2, core_pos);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure elapsed time not less than sleep time. */
+ if (cactus_get_response(ret_values) < SLEEP_TIME) {
+ ERROR("Lapsed time less than requested sleep time\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ This test exercises the following scenario: Managed exit is
+ * supported by both SPs in a call chain. A non-secure interrupt triggers
+ * while the second SP is processing a direct request message sent by the first
+ * SP. We choose SP(1) as the first SP and SP(3) as the second SP.
+ *
+ * 1. Register a handler for the non-secure timer interrupt. Program it to fire
+ * in a certain time.
+ *
+ * 2. Send a direct request to the first SP(i.e., SP(1)) to forward sleep command to
+ * the second SP(i.e., SP(3)).
+ *
+ * 3. While the second SP is running the busy loop, non-secure interrupt would
+ * trigger during this time.
+ *
+ * 4. The interrupt will be trapped to SPMC as FIQ. SPMC will inject the managed
+ * exit signal to the second SP through vIRQ conduit and perform eret to
+ * resume execution in the second SP.
+ *
+ * 5. The second SP sends the managed exit direct response to the first SP
+ * through its interrupt handler for managed exit.
+ *
+ * 6. SPMC proactively injects managed exit signal to the first SP through vFIQ
+ * conduit and resumes it using eret.
+ *
+ * 7. The first Cactus SP sends the managed exit direct response to TFTF through
+ * its interrupt handler for managed exit.
+ *
+ * 8. TFTF checks the return value in the direct message response from the first SP
+ * and ensures it is managed signal interrupt ID.
+ *
+ * 9. Check whether the pending non-secure timer interrupt successfully got
+ * handled in the normal world by TFTF.
+ *
+ * 10. Send a dummy direct message request command to resume the first SP's execution.
+ *
+ * 11. The first SP direct message request returns with managed exit response. It
+ * then sends a dummy direct message request command to resume the second SP's
+ * execution.
+ *
+ * 12. The second SP resumes in the sleep routine and sends a direct message
+ * response to the first SP.
+ *
+ * 13. The first SP checks if time lapsed is not lesser than sleep time and if
+ * successful, sends direct message response to the TFTF.
+ *
+ * 14. TFTF ensures the direct message response did not return with an error.
+ *
+ */
+test_result_t test_ffa_ns_interrupt_managed_exit_chained(void)
+{
+ int ret;
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
- ret = tftf_program_timer(100);
+ ret = program_timer(TIMER_DURATION);
if (ret < 0) {
ERROR("Failed to program timer (%d)\n", ret);
return TEST_RESULT_FAIL;
}
- /* Send request to primary Cactus to sleep for 200ms */
+ /*
+ * Send request to first Cactus SP to send request to another Cactus
+ * SP to sleep.
+ */
+ ret_values = cactus_fwd_sleep_cmd(SENDER, RECEIVER, RECEIVER_3,
+ SLEEP_TIME_FWD, true);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Managed exit interrupt occurs during this time, Cactus
+ * will respond with interrupt ID.
+ */
+ if (cactus_get_response(ret_values) != MANAGED_EXIT_INTERRUPT_ID) {
+ ERROR("Managed exit interrupt did not occur!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (check_timer_interrupt() == 0) {
+ ERROR("Timer interrupt hasn't actually been handled.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send a command asking the SP to resume after managed exit. This
+ * effectively resumes the Cactus in the sleep routine. Note that
+ * Cactus should return error if the current endpoint sent a new
+ * command.
+ */
ret_values = cactus_sleep_cmd(SENDER, RECEIVER, SLEEP_TIME);
+ if (cactus_get_response(ret_values) != CACTUS_ERROR &&
+ cactus_error_code(ret_values) != CACTUS_ERROR_TEST) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret_values = cactus_resume_after_managed_exit(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) == CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ This test exercises the following scenario: Managed exit is
+ * supported by the first SP but not by the second SP in a call chain. A
+ * non-secure interrupt triggers while the second SP is processing a direct request
+ * message sent by the first SP. We choose SP(1) as the first SP and SP(2) as
+ * the second SP.
+ *
+ * 1. Register a handler for the non-secure timer interrupt. Program it to fire
+ * in a certain time.
+ *
+ * 2. Send a direct request to the first SP(i.e., SP(1)) to forward sleep command to
+ * the second SP(i.e., SP(2)).
+ *
+ * 3. While the second SP is running the busy loop, non-secure interrupt would
+ * trigger during this time.
+ *
+ * 4. The interrupt will be trapped to SPMC as FIQ. SPMC finds the source of
+ * the interrupted direct message request and prepares the return status
+ * as FFA_INTERRUPT.
+ *
+ * 5. SPMC injects managed exit signal to the first SP through vFIQ
+ * conduit and resumes it using eret.
+ *
+ * 6. The first Cactus SP sends the managed exit direct response to TFTF through
+ * its interrupt handler for managed exit.
+ *
+ * 7. TFTF checks the return value in the direct message response from the first SP
+ * and ensures it is managed signal interrupt ID.
+ *
+ * 8. Check whether the pending non-secure timer interrupt successfully got
+ * handled in the normal world by TFTF.
+ *
+ * 9. Send a dummy direct message request command to resume the first SP's execution.
+ *
+ * 10. The first SP direct message request returns with FFA_INTERRUPT status. It
+ * then resumes the second SP's execution using FFA_RUN ABI.
+ *
+ * 11. The second SP resumes in the sleep routine and sends a direct message
+ * response to the first SP.
+ *
+ * 12. The first SP checks if time lapsed is not lesser than sleep time and if
+ * successful, sends direct message response to the TFTF.
+ *
+ * 13. TFTF ensures the direct message response did not return with an error.
+ *
+ */
+test_result_t test_ffa_SPx_ME_SPy_signaled(void)
+{
+ int ret;
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ ret = program_timer(TIMER_DURATION);
+ if (ret < 0) {
+ ERROR("Failed to program timer (%d)\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send request to first Cactus SP to send request to another Cactus
+ * SP to sleep.
+ */
+ ret_values = cactus_fwd_sleep_cmd(SENDER, RECEIVER, RECEIVER_2,
+ SLEEP_TIME_FWD, true);
+
if (!is_ffa_direct_response(ret_values)) {
return TEST_RESULT_FAIL;
}
@@ -107,42 +435,207 @@ test_result_t test_ffa_ns_interrupt(void)
return TEST_RESULT_FAIL;
}
- /* Check that the timer interrupt has been handled in NS-world (TFTF) */
- tftf_cancel_timer();
- tftf_timer_unregister_handler();
+ if (check_timer_interrupt() == 0) {
+ ERROR("Timer interrupt hasn't actually been handled.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send a command asking the SP to resume after managed exit. This
+ * effectively resumes the Cactus in the sleep routine. Note that
+ * Cactus should return error if the current endpoint sent a new
+ * command.
+ */
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER, SLEEP_TIME);
+
+ if (cactus_get_response(ret_values) != CACTUS_ERROR &&
+ cactus_error_code(ret_values) != CACTUS_ERROR_TEST) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret_values = cactus_resume_after_managed_exit(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) == CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
- if (timer_irq_received == 0) {
+/*
+ * @Test_Aim@ This test exercises the following scenario: Managed exit is
+ * supported by the second SP but not by the first SP in a call chain. A non-secure
+ * interrupt triggers while the second SP is processing a direct request message
+ * sent by the first SP. We choose SP(2) as the first SP and SP(1) as the second SP.
+ *
+ * 1. Register a handler for the non-secure timer interrupt. Program it to fire
+ * in a certain time.
+ *
+ * 2. Send a direct request to the first SP(i.e., SP(2)) to forward sleep command to
+ * the second SP(i.e., SP(1)).
+ *
+ * 3. While the second SP is running the busy loop, non-secure interrupt would
+ * trigger during this time.
+ *
+ * 4. The interrupt will be trapped to SPMC as FIQ. SPMC will inject the managed
+ * exit signal to the second SP through vFIQ conduit and perform eret to
+ * resume execution in the second SP.
+ *
+ * 5. The second SP sends the managed exit direct response to the first SP
+ * through its interrupt handler for managed exit. Note that SPMC does not
+ * change the state of the non-secure interrupt at the GIC interface. SPMC
+ * resumes the first SP but execution immediately traps to fiq handler of
+ * SPMC.
+ *
+ * 6. SPMC returns control to the normal world with the help of SPMD through
+ * FFA_INTERRUPT ABI for TFTF to handle the non-secure interrupt.
+ *
+ * 7. TFTF checks the direct message request to the first SP returned with a
+ * FFA_INTERRUPT status.
+ *
+ * 8. Check whether the pending non-secure timer interrupt successfully got
+ * handled in the normal world by TFTF.
+ *
+ * 9. Resume the first Cactus SP using FFA_RUN ABI.
+ *
+ * 10. The first SP direct message request returns with managed exit response. It
+ * then sends a dummy direct message request command to resume the second SP's
+ * execution.
+ *
+ * 11. The second SP resumes in the sleep routine and sends a direct message
+ * response to the first SP.
+ *
+ * 12. The first SP checks if time lapsed is not lesser than sleep time and if
+ * successful, sends direct message response to the TFTF.
+ *
+ * 13. TFTF ensures the direct message response did not return with an error.
+ *
+ */
+test_result_t test_ffa_SPx_signaled_SPy_ME(void)
+{
+ int ret;
+ struct ffa_value ret_values;
+ unsigned int core_pos = get_current_core_id();
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ ret = program_timer(TIMER_DURATION);
+ if (ret < 0) {
+ ERROR("Failed to program timer (%d)\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send a request to the first Cactus SP to send request to another Cactus
+ * SP to sleep.
+ */
+ VERBOSE("Forward sleep command\n");
+ ret_values = cactus_fwd_sleep_cmd(SENDER, RECEIVER_2, RECEIVER,
+ SLEEP_TIME_FWD, true);
+
+ if (check_timer_interrupt() == 0) {
ERROR("Timer interrupt hasn't actually been handled.\n");
return TEST_RESULT_FAIL;
}
/*
- * Send a dummy direct message request to relinquish CPU cycles.
- * This resumes Cactus in the sleep routine.
+ * Cactus SP should be preempted by non-secure interrupt. SPMC
+ * returns control to the normal world through FFA_INTERRUPT ABI
+ * for it to handle the non-secure interrupt.
*/
- ret_values = ffa_msg_send_direct_req64(SENDER, RECEIVER,
- 0, 0, 0, 0, 0);
+ if (ffa_func_id(ret_values) != FFA_INTERRUPT) {
+ ERROR("Expected FFA_INTERRUPT as return status!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Ensure SPMC returns FFA_ERROR with BUSY error code when a direct
+ * request message is sent to the preempted SP.
+ */
+ ret_values = cactus_echo_send_cmd(SENDER, RECEIVER_2, ECHO_VAL1);
+
+ if ((ffa_func_id(ret_values) != FFA_ERROR) ||
+ (ffa_error_code(ret_values) != FFA_ERROR_BUSY)) {
+ ERROR("Expected FFA_ERROR(BUSY)! Got %x(%x)\n",
+ ffa_func_id(ret_values), ffa_error_code(ret_values));
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Resume the Cactus SP using FFA_RUN ABI for it to complete the
+ * sleep routine and send the direct response message.
+ */
+ ret_values = ffa_run(RECEIVER_2, core_pos);
if (!is_ffa_direct_response(ret_values)) {
return TEST_RESULT_FAIL;
}
- /* Make sure elapsed time not less than sleep time */
- if (cactus_get_response(ret_values) < SLEEP_TIME) {
- ERROR("Lapsed time less than requested sleep time\n");
+ if (cactus_get_response(ret_values) == CACTUS_ERROR) {
return TEST_RESULT_FAIL;
}
- /* Disable Managed exit interrupt */
- ret_values = cactus_interrupt_cmd(SENDER, RECEIVER, MANAGED_EXIT_INTERRUPT_ID,
- false, 0);
+ return TEST_RESULT_SUCCESS;
+}
+/*
+ * @Test_Aim@ Test the scenario where a non-secure interrupt triggers while a
+ * Secure Partition,that specified action for NS interrupt as QUEUED, is
+ * executing.
+ *
+ * 1. Register a handler for the non-secure timer interrupt. Program it to fire
+ * in a certain time.
+ *
+ * 2. Send a direct request to Cactus SP to execute in busy loop.
+ *
+ * 3. While executing in busy loop, the non-secure timer should fire. Cactus SP
+ * should be NOT be preempted by non-secure interrupt.
+ *
+ * 4. Cactus SP should complete the sleep routine and return with a direct
+ * response message.
+ *
+ * 5. Ensure that elapsed time in the sleep routine is not less than sleep time
+ * requested through direct message request.
+ *
+ */
+test_result_t test_ffa_ns_interrupt_queued(void)
+{
+ int ret;
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ ret = program_timer(TIMER_DURATION);
+ if (ret < 0) {
+ ERROR("Failed to program timer (%d)\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Send request to a Cactus SP to sleep for 100ms. */
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER_3, SLEEP_TIME);
+
+ if (check_timer_interrupt() == 0) {
+ ERROR("Timer interrupt hasn't actually been handled.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Cactus SP should not be preempted by non-secure interrupt. It
+ * should complete the sleep routine and return with a direct response
+ * message.
+ */
if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected direct message response\n");
return TEST_RESULT_FAIL;
}
- if (cactus_get_response(ret_values) != CACTUS_SUCCESS) {
- ERROR("Failed to disable Managed exit interrupt\n");
+ /* Make sure elapsed time not less than sleep time. */
+ if (cactus_get_response(ret_values) < SLEEP_TIME) {
+ ERROR("Lapsed time less than requested sleep time\n");
return TEST_RESULT_FAIL;
}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c b/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
index f126c57d6..af5a077f0 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
@@ -1,16 +1,26 @@
/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include "arch_features.h"
+#include "arch_helpers.h"
+#include "ffa_helpers.h"
+#include "ffa_svc.h"
+#include "stdint.h"
+#include "utils_def.h"
#include <debug.h>
+#include "ffa_helpers.h"
+#include <sync.h>
#include <cactus_test_cmds.h>
#include <ffa_endpoints.h>
+#include <host_realm_rmi.h>
+#include <spm_common.h>
+#include <spm_test_helpers.h>
#include <test_helpers.h>
#include <tftf_lib.h>
-#include <spm_common.h>
#include <xlat_tables_defs.h>
#define MAILBOX_SIZE PAGE_SIZE
@@ -18,12 +28,134 @@
#define SENDER HYP_ID
#define RECEIVER SP_ID(1)
+/*
+ * A number of pages that is large enough that it must take two fragments to
+ * share.
+ */
+#define FRAGMENTED_SHARE_PAGE_COUNT \
+ (sizeof(struct ffa_memory_region) / \
+ sizeof(struct ffa_memory_region_constituent))
+
static const struct ffa_uuid expected_sp_uuids[] = {
{PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
};
/* Memory section to be used for memory share operations */
-static __aligned(PAGE_SIZE) uint8_t share_page[PAGE_SIZE];
+static __aligned(PAGE_SIZE) uint8_t
+ share_page[PAGE_SIZE * FRAGMENTED_SHARE_PAGE_COUNT];
+static __aligned(PAGE_SIZE) uint8_t donate_page[PAGE_SIZE];
+static __aligned(PAGE_SIZE) uint8_t consecutive_donate_page[PAGE_SIZE];
+static __aligned(PAGE_SIZE) uint8_t four_share_pages[PAGE_SIZE * 4];
+
+static bool gpc_abort_triggered;
+
+static bool check_written_words(uint32_t *ptr, uint32_t word, uint32_t wcount)
+{
+ VERBOSE("TFTF - Memory contents after SP use:\n");
+ for (unsigned int i = 0U; i < wcount; i++) {
+ VERBOSE(" %u: %x\n", i, ptr[i]);
+
+ /* Verify content of memory is as expected. */
+ if (ptr[i] != word) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool test_memory_send_expect_denied(uint32_t mem_func,
+ void *mem_ptr,
+ ffa_id_t borrower)
+{
+ struct ffa_value ret;
+ struct mailbox_buffers mb;
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)mem_ptr, 1, 0}
+ };
+ ffa_memory_handle_t handle;
+
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(borrower,
+ mem_func);
+
+ GET_TFTF_MAILBOX(mb);
+
+ handle = memory_init_and_send((struct ffa_memory_region *)mb.send,
+ MAILBOX_SIZE, SENDER, &receiver, 1,
+ constituents, constituents_count,
+ mem_func, &ret);
+
+ if (handle != FFA_MEMORY_HANDLE_INVALID) {
+ ERROR("Received a valid FF-A memory handle, and that isn't "
+ "expected.\n");
+ return false;
+ }
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_DENIED)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool data_abort_handler(void)
+{
+ uint64_t esr_elx = IS_IN_EL2() ? read_esr_el2() : read_esr_el1();
+
+ VERBOSE("%s esr_elx %llx\n", __func__, esr_elx);
+
+ if (EC_BITS(esr_elx) == EC_DABORT_CUR_EL) {
+ /* Synchronous data abort triggered by Granule protection */
+ if ((ISS_BITS(esr_elx) & ISS_DFSC_MASK) == DFSC_GPF_DABORT) {
+ VERBOSE("%s GPF Data Abort caught to address: %llx\n",
+ __func__, (uint64_t)read_far_el2());
+ gpc_abort_triggered = true;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool get_gpc_abort_triggered(void)
+{
+ bool ret = gpc_abort_triggered;
+
+ gpc_abort_triggered = false;
+
+ return ret;
+}
+
+/**
+ * Test invocation to FF-A memory sharing interfaces that should return in an
+ * error.
+ */
+test_result_t test_share_forbidden_ranges(void)
+{
+ const uintptr_t forbidden_address[] = {
+ /* Cactus SP memory. */
+ (uintptr_t)0x7200000,
+ /* SPMC Memory. */
+ (uintptr_t)0x6000000,
+ /* NS memory defined in cactus tertiary. */
+ (uintptr_t)0x0000880080001000,
+ };
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ for (unsigned i = 0; i < 3; i++) {
+ if (!test_memory_send_expect_denied(
+ FFA_MEM_SHARE_SMC32, (void *)forbidden_address[i],
+ RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
/**
* Tests that it is possible to share memory with SWd from NWd.
@@ -43,68 +175,111 @@ static __aligned(PAGE_SIZE) uint8_t share_page[PAGE_SIZE];
* Hypervisor (sitting in EL2) would relinquish access from EL1/EL0
* FF-A endpoint at relevant moment.
*/
-static test_result_t test_memory_send_sp(uint32_t mem_func)
+static test_result_t test_memory_send_sp(uint32_t mem_func, ffa_id_t borrower,
+ struct ffa_memory_region_constituent *constituents,
+ size_t constituents_count)
{
- smc_ret_values ret;
+ struct ffa_value ret;
ffa_memory_handle_t handle;
uint32_t *ptr;
struct mailbox_buffers mb;
+ unsigned int rme_supported = get_armv9_2_feat_rme_support();
+ const bool check_gpc_fault =
+ mem_func != FFA_MEM_SHARE_SMC32 &&
+ rme_supported != 0U;
+
+ /* Arbitrarily write 5 words after using memory. */
+ const uint32_t nr_words_to_write = 5;
+
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(borrower,
+ mem_func);
/***********************************************************************
* Check if SPMC has ffa_version and expected FFA endpoints are deployed.
**********************************************************************/
- CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
GET_TFTF_MAILBOX(mb);
- struct ffa_memory_region_constituent constituents[] = {
- {(void *)share_page, 1, 0}
- };
+ /*
+ * If the RME is enabled for the platform under test, check that the
+ * GPCs are working as expected, as such setup the exception handler.
+ */
+ if (check_gpc_fault) {
+ register_custom_sync_exception_handler(data_abort_handler);
+ }
- const uint32_t constituents_count = sizeof(constituents) /
- sizeof(struct ffa_memory_region_constituent);
+ for (size_t i = 0; i < constituents_count; i++) {
+ VERBOSE("Sharing Address: %p\n", constituents[i].address);
+ ptr = (uint32_t *)constituents[i].address;
+ for (size_t j = 0; j < nr_words_to_write; j++) {
+ ptr[j] = mem_func + 0xFFA;
+ }
+ }
handle = memory_init_and_send((struct ffa_memory_region *)mb.send,
- MAILBOX_SIZE, SENDER, RECEIVER,
+ MAILBOX_SIZE, SENDER, &receiver, 1,
constituents, constituents_count,
- mem_func);
+ mem_func, &ret);
if (handle == FFA_MEMORY_HANDLE_INVALID) {
return TEST_RESULT_FAIL;
}
- VERBOSE("TFTF - Handle: %llx\nTFTF - Address: %p\n",
- handle, constituents[0].address);
+ VERBOSE("TFTF - Handle: %llx\n", handle);
ptr = (uint32_t *)constituents[0].address;
- ret = cactus_mem_send_cmd(SENDER, RECEIVER, mem_func, handle);
-
- if (!is_ffa_direct_response(ret)) {
- return TEST_RESULT_FAIL;
- }
+ ret = cactus_mem_send_cmd(SENDER, borrower, mem_func, handle, 0,
+ nr_words_to_write, false);
- if (cactus_get_response(ret) != CACTUS_SUCCESS) {
+ if (!is_ffa_direct_response(ret) ||
+ cactus_get_response(ret) != CACTUS_SUCCESS) {
+ ffa_mem_reclaim(handle, 0);
ERROR("Failed memory send operation!\n");
return TEST_RESULT_FAIL;
}
/*
- * Print 5 words from the memory region to validate SP wrote to the
- * memory region.
+ * If there is RME support, look to trigger an exception as soon as the
+ * security state is update, due to GPC fault.
*/
- VERBOSE("TFTF - Memory contents after SP use:\n");
- for (unsigned int i = 0U; i < 5U; i++)
- VERBOSE(" %u: %x\n", i, ptr[i]);
+ if (check_gpc_fault) {
+ *ptr = 0xBEEF;
+ }
- /* To make the compiler happy in case it is not a verbose build */
- if (LOG_LEVEL < LOG_LEVEL_VERBOSE)
- (void)ptr;
+ if (mem_func != FFA_MEM_DONATE_SMC32) {
- if (mem_func != FFA_MEM_DONATE_SMC32 &&
- is_ffa_call_error(ffa_mem_reclaim(handle, 0))) {
+ /* Reclaim memory entirely before checking its state. */
+ if (is_ffa_call_error(ffa_mem_reclaim(handle, 0))) {
tftf_testcase_printf("Couldn't reclaim memory\n");
return TEST_RESULT_FAIL;
+ }
+
+ for (uint32_t i = 0; i < constituents_count; i++) {
+ ptr = constituents[i].address;
+
+ /*
+ * Check that borrower used the memory as expected
+ * for FFA_MEM_SHARE test.
+ */
+ if (mem_func == FFA_MEM_SHARE_SMC32 &&
+ !check_written_words(ptr,
+ mem_func + 0xFFAU,
+ nr_words_to_write)) {
+ ERROR("Fail because of state of memory.\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+ }
+
+ if (check_gpc_fault) {
+ unregister_custom_sync_exception_handler();
+ if (!get_gpc_abort_triggered()) {
+ ERROR("No exception due to GPC for lend/donate with RME.\n");
+ return TEST_RESULT_FAIL;
+ }
}
return TEST_RESULT_SUCCESS;
@@ -112,17 +287,78 @@ static test_result_t test_memory_send_sp(uint32_t mem_func)
test_result_t test_mem_share_sp(void)
{
- return test_memory_send_sp(FFA_MEM_SHARE_SMC32);
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)share_page, 1, 0}
+ };
+
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+
+ return test_memory_send_sp(FFA_MEM_SHARE_SMC32, RECEIVER, constituents,
+ constituents_count);
}
test_result_t test_mem_lend_sp(void)
{
- return test_memory_send_sp(FFA_MEM_LEND_SMC32);
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)four_share_pages, 4, 0},
+ {(void *)share_page, 1, 0}
+ };
+
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+
+ return test_memory_send_sp(FFA_MEM_LEND_SMC32, RECEIVER, constituents,
+ constituents_count);
}
test_result_t test_mem_donate_sp(void)
{
- return test_memory_send_sp(FFA_MEM_DONATE_SMC32);
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)donate_page, 1, 0}
+ };
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+ return test_memory_send_sp(FFA_MEM_DONATE_SMC32, RECEIVER, constituents,
+ constituents_count);
+}
+
+test_result_t test_consecutive_donate(void)
+{
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)consecutive_donate_page, 1, 0}
+ };
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ test_result_t ret = test_memory_send_sp(FFA_MEM_DONATE_SMC32, SP_ID(1),
+ constituents,
+ constituents_count);
+
+ if (ret != TEST_RESULT_SUCCESS) {
+ ERROR("Failed at first attempting of sharing.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!test_memory_send_expect_denied(FFA_MEM_DONATE_SMC32,
+ consecutive_donate_page,
+ SP_ID(1))) {
+ ERROR("Memory was successfully donated again from the NWd, to "
+ "the same borrower.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!test_memory_send_expect_denied(FFA_MEM_DONATE_SMC32,
+ consecutive_donate_page,
+ SP_ID(2))) {
+ ERROR("Memory was successfully donated again from the NWd, to "
+ "another borrower.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
}
/*
@@ -130,44 +366,798 @@ test_result_t test_mem_donate_sp(void)
* Cactus SP should reply to TFTF on whether the test succeeded or not.
*/
static test_result_t test_req_mem_send_sp_to_sp(uint32_t mem_func,
- ffa_vm_id_t sender_sp,
- ffa_vm_id_t receiver_sp)
+ ffa_id_t sender_sp,
+ ffa_id_t receiver_sp,
+ bool non_secure)
{
- smc_ret_values ret;
+ struct ffa_value ret;
/***********************************************************************
* Check if SPMC's ffa_version and presence of expected FF-A endpoints.
**********************************************************************/
- CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
ret = cactus_req_mem_send_send_cmd(HYP_ID, sender_sp, mem_func,
- receiver_sp);
+ receiver_sp, non_secure);
if (!is_ffa_direct_response(ret)) {
return TEST_RESULT_FAIL;
}
if (cactus_get_response(ret) == CACTUS_ERROR) {
+ ERROR("Failed sharing memory between SPs. Error code: %d\n",
+ cactus_error_code(ret));
return TEST_RESULT_FAIL;
}
return TEST_RESULT_SUCCESS;
}
+/*
+ * Test requests a memory send operation from SP to VM.
+ * The tests expects cactus to reply CACTUS_ERROR, providing FF-A error code of
+ * the last memory send FF-A call that cactus performed.
+ */
+static test_result_t test_req_mem_send_sp_to_vm(uint32_t mem_func,
+ ffa_id_t sender_sp,
+ ffa_id_t receiver_vm)
+{
+ struct ffa_value ret;
+
+ /**********************************************************************
+ * Check if SPMC's ffa_version and presence of expected FF-A endpoints.
+ *********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ ret = cactus_req_mem_send_send_cmd(HYP_ID, sender_sp, mem_func,
+ receiver_vm, false);
+
+ if (!is_ffa_direct_response(ret)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret) == CACTUS_ERROR &&
+ cactus_error_code(ret) == FFA_ERROR_DENIED) {
+ return TEST_RESULT_SUCCESS;
+ }
+
+ tftf_testcase_printf("Did not get the expected error, "
+ "mem send returned with %d\n",
+ cactus_get_response(ret));
+ return TEST_RESULT_FAIL;
+}
+
test_result_t test_req_mem_share_sp_to_sp(void)
{
return test_req_mem_send_sp_to_sp(FFA_MEM_SHARE_SMC32, SP_ID(3),
- SP_ID(2));
+ SP_ID(2), false);
+}
+
+test_result_t test_req_ns_mem_share_sp_to_sp(void)
+{
+ /*
+ * Skip the test when RME is enabled (for test setup reasons).
+ * For RME tests, the model specifies 48b physical address size
+ * at the PE, but misses allocating RAM and increasing the PA at
+ * the interconnect level.
+ */
+ if (get_armv9_2_feat_rme_support() != 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* This test requires 48b physical address size capability. */
+ SKIP_TEST_IF_PA_SIZE_LESS_THAN(48);
+
+ return test_req_mem_send_sp_to_sp(FFA_MEM_SHARE_SMC32, SP_ID(3),
+ SP_ID(2), true);
}
test_result_t test_req_mem_lend_sp_to_sp(void)
{
return test_req_mem_send_sp_to_sp(FFA_MEM_LEND_SMC32, SP_ID(3),
- SP_ID(2));
+ SP_ID(2), false);
}
test_result_t test_req_mem_donate_sp_to_sp(void)
{
return test_req_mem_send_sp_to_sp(FFA_MEM_DONATE_SMC32, SP_ID(1),
- SP_ID(3));
+ SP_ID(3), false);
+}
+
+test_result_t test_req_mem_share_sp_to_vm(void)
+{
+ return test_req_mem_send_sp_to_vm(FFA_MEM_SHARE_SMC32, SP_ID(1),
+ HYP_ID);
+}
+
+test_result_t test_req_mem_lend_sp_to_vm(void)
+{
+ return test_req_mem_send_sp_to_vm(FFA_MEM_LEND_SMC32, SP_ID(2),
+ HYP_ID);
+}
+
+test_result_t test_mem_share_to_sp_clear_memory(void)
+{
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)four_share_pages, 4, 0},
+ {(void *)share_page, 1, 0}
+ };
+
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+ struct mailbox_buffers mb;
+ uint32_t remaining_constituent_count;
+ uint32_t total_length;
+ uint32_t fragment_length;
+ ffa_memory_handle_t handle;
+ struct ffa_value ret;
+ /* Arbitrarily write 10 words after using shared memory. */
+ const uint32_t nr_words_to_write = 10U;
+
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(
+ RECEIVER, FFA_MEM_LEND_SMC32);
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ remaining_constituent_count = ffa_memory_region_init(
+ (struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
+ &receiver, 1, constituents, constituents_count, 0,
+ FFA_MEMORY_REGION_FLAG_CLEAR,
+ FFA_MEMORY_NOT_SPECIFIED_MEM, 0, 0,
+ &total_length, &fragment_length);
+
+ if (remaining_constituent_count != 0) {
+ ERROR("Transaction descriptor initialization failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ handle = memory_send(mb.send, FFA_MEM_LEND_SMC32, constituents,
+ constituents_count, remaining_constituent_count,
+ fragment_length, total_length, &ret);
+
+ if (handle == FFA_MEMORY_HANDLE_INVALID) {
+ ERROR("Memory Share failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ VERBOSE("Memory has been shared!\n");
+
+ ret = cactus_mem_send_cmd(SENDER, RECEIVER, FFA_MEM_LEND_SMC32, handle,
+ FFA_MEMORY_REGION_FLAG_CLEAR,
+ nr_words_to_write, false);
+
+ if (!is_ffa_direct_response(ret)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret) != CACTUS_SUCCESS) {
+ ERROR("Failed memory send operation!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_mem_reclaim(handle, 0);
+
+ if (is_ffa_call_error(ret)) {
+ ERROR("Memory reclaim failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Print `region` if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+ */
+static void print_memory_region(struct ffa_memory_region *region)
+{
+ VERBOSE("region.sender = %d\n", region->sender);
+ VERBOSE("region.attributes.shareability = %d\n",
+ region->attributes.shareability);
+ VERBOSE("region.attributes.cacheability = %d\n",
+ region->attributes.cacheability);
+ VERBOSE("region.attributes.type = %d\n", region->attributes.type);
+ VERBOSE("region.attributes.security = %d\n",
+ region->attributes.security);
+ VERBOSE("region.flags = %d\n", region->flags);
+ VERBOSE("region.handle = %lld\n", region->handle);
+ VERBOSE("region.tag = %lld\n", region->tag);
+ VERBOSE("region.memory_access_desc_size = %d\n",
+ region->memory_access_desc_size);
+ VERBOSE("region.receiver_count = %d\n", region->receiver_count);
+ VERBOSE("region.receivers_offset = %d\n", region->receivers_offset);
+}
+
+/**
+ * Used by hypervisor retrieve request test: validate descriptors provided by
+ * SPMC.
+ */
+static bool verify_retrieve_response(const struct ffa_memory_region *region1,
+ const struct ffa_memory_region *region2)
+{
+ if (region1->sender != region2->sender) {
+ ERROR("region1.sender=%d, expected %d\n", region1->sender,
+ region2->sender);
+ return false;
+ }
+ if (region1->attributes.shareability != region2->attributes.shareability) {
+ ERROR("region1.attributes.shareability=%d, expected %d\n",
+ region1->attributes.shareability,
+ region2->attributes.shareability);
+ return false;
+ }
+ if (region1->attributes.cacheability != region2->attributes.cacheability) {
+ ERROR("region1.attributes.cacheability=%d, expected %d\n",
+ region1->attributes.cacheability,
+ region2->attributes.cacheability);
+ return false;
+ }
+ if (region1->attributes.type != region2->attributes.type) {
+ ERROR("region1.attributes.type=%d, expected %d\n",
+ region1->attributes.type, region2->attributes.type);
+ return false;
+ }
+ if (region1->attributes.security != region2->attributes.security) {
+ ERROR("region1.attributes.security=%d, expected %d\n",
+ region1->attributes.security, region2->attributes.security);
+ return false;
+ }
+ if (region1->flags != region2->flags) {
+ ERROR("region1->flags=%d, expected %d\n", region1->flags,
+ region2->flags);
+ return false;
+ }
+ if (region1->handle != region2->handle) {
+ ERROR("region1.handle=%lld, expected %lld\n", region1->handle,
+ region2->handle);
+ return false;
+ }
+ if (region1->tag != region2->tag) {
+ ERROR("region1.tag=%lld, expected %lld\n", region1->tag, region2->tag);
+ return false;
+ }
+ if (region1->memory_access_desc_size != region2->memory_access_desc_size) {
+ ERROR("region1.memory_access_desc_size=%d, expected %d\n",
+ region1->memory_access_desc_size,
+ region2->memory_access_desc_size);
+ return false;
+ }
+ if (region1->receiver_count != region2->receiver_count) {
+ ERROR("region1.receiver_count=%d, expected %d\n",
+ region1->receiver_count, region2->receiver_count);
+ return false;
+ }
+ if (region1->receivers_offset != region2->receivers_offset) {
+ ERROR("region1.receivers_offset=%d, expected %d\n",
+ region1->receivers_offset, region2->receivers_offset);
+ return false;
+ }
+ for (uint32_t i = 0; i < 3; i++) {
+ if (region1->reserved[i] != 0) {
+ ERROR("region.reserved[%d]=%d, expected 0\n", i,
+ region1->reserved[i]);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * Used by hypervisor retrieve request test: validate descriptors provided by
+ * SPMC.
+ */
+static bool
+verify_constituent(struct ffa_memory_region_constituent *constituent,
+ void *address, uint32_t page_count)
+{
+ if (constituent->address != address) {
+ ERROR("constituent.address=%p, expected %p\n",
+ constituent->address, address);
+ return false;
+ }
+ if (constituent->page_count != page_count) {
+ ERROR("constituent.page_count=%d, expected %d\n",
+ constituent->page_count, page_count);
+ return false;
+ }
+ if (constituent->reserved != 0) {
+ ERROR("constituent.reserved=%d, expected 0\n",
+ constituent->reserved);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Used by hypervisor retrieve request test: validate descriptors provided by
+ * SPMC.
+ */
+static bool verify_composite(struct ffa_composite_memory_region *composite,
+ struct ffa_memory_region_constituent *constituent,
+ uint32_t page_count, uint32_t constituent_count)
+{
+ if (composite->page_count != page_count) {
+ ERROR("composite.page_count=%d, expected %d\n",
+ composite->page_count, page_count);
+ return false;
+ }
+ if (composite->constituent_count != constituent_count) {
+ ERROR("composite.constituent_count=%d, expected %d\n",
+ composite->constituent_count, constituent_count);
+ return false;
+ }
+ if (composite->reserved_0 != 0) {
+ ERROR("composite.reserved_0=%llu, expected 0\n",
+ composite->reserved_0);
+ return false;
+ }
+ for (uint32_t j = 0; j < composite->constituent_count; j++) {
+ if (!verify_constituent(constituent, share_page, 1)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool verify_receivers_impdef(struct ffa_memory_access_impdef impdef1,
+ struct ffa_memory_access_impdef impdef2)
+{
+ if (impdef1.val[0] != impdef2.val[0] ||
+ impdef1.val[1] != impdef2.val[1]) {
+ ERROR("ipmdef1.val[0]=%llu expected=%llu"
+ " ipmdef1.val[1]=%llu expected=%llu\n",
+ impdef1.val[0], impdef2.val[0],
+ impdef1.val[1], impdef2.val[1]);
+ return false;
+ }
+
+ return true;
+}
+
+static bool verify_permissions(
+ ffa_memory_access_permissions_t permissions1,
+ ffa_memory_access_permissions_t permissions2)
+{
+ uint8_t access1;
+ uint8_t access2;
+
+ access1 = permissions1.data_access;
+ access2 = permissions2.data_access;
+
+ if (access1 != access2) {
+ ERROR("permissions1.data_access=%u expected=%u\n",
+ access1, access2);
+ return false;
+ }
+
+ access1 = permissions1.instruction_access;
+ access2 = permissions2.instruction_access;
+
+ if (access1 != access2) {
+ ERROR("permissions1.instruction_access=%u expected=%u\n",
+ access1, access2);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Used by hypervisor retrieve request test: validate descriptors provided by
+ * SPMC.
+ */
+static bool verify_receivers(struct ffa_memory_access *receivers1,
+ struct ffa_memory_access *receivers2,
+ uint32_t receivers_count)
+{
+ for (uint32_t i = 0; i < receivers_count; i++) {
+ if (receivers1[i].receiver_permissions.receiver !=
+ receivers2[i].receiver_permissions.receiver) {
+ ERROR("receivers1[%u].receiver_permissions.receiver=%x"
+ " expected=%x\n", i,
+ receivers1[i].receiver_permissions.receiver,
+ receivers2[i].receiver_permissions.receiver);
+ return false;
+ }
+
+ if (receivers1[i].receiver_permissions.flags !=
+ receivers2[i].receiver_permissions.flags) {
+ ERROR("receivers1[%u].receiver_permissions.flags=%u"
+ " expected=%u\n", i,
+ receivers1[i].receiver_permissions.flags,
+ receivers2[i].receiver_permissions.flags);
+ return false;
+ }
+
+ if (!verify_permissions(
+ receivers1[i].receiver_permissions.permissions,
+ receivers2[i].receiver_permissions.permissions)) {
+ return false;
+ }
+
+ if (receivers1[i].composite_memory_region_offset !=
+ receivers2[i].composite_memory_region_offset) {
+ ERROR("receivers1[%u].composite_memory_region_offset=%u"
+ " expected %u\n",
+ i, receivers1[i].composite_memory_region_offset,
+ receivers2[i].composite_memory_region_offset);
+ return false;
+ }
+
+ if (!verify_receivers_impdef(receivers1[i].impdef,
+ receivers1[i].impdef)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Helper for performing a hypervisor retrieve request test.
+ */
+static test_result_t hypervisor_retrieve_request_test_helper(
+ uint32_t mem_func, bool multiple_receivers, bool fragmented)
+{
+ static struct ffa_memory_region_constituent
+ sent_constituents[FRAGMENTED_SHARE_PAGE_COUNT];
+ __aligned(PAGE_SIZE) static uint8_t page[PAGE_SIZE * 2] = {0};
+ struct ffa_memory_region *hypervisor_retrieve_response =
+ (struct ffa_memory_region *)page;
+ struct ffa_memory_region expected_response;
+ struct mailbox_buffers mb;
+ ffa_memory_handle_t handle;
+ struct ffa_value ret;
+ struct ffa_composite_memory_region *composite;
+ struct ffa_memory_access *retrvd_receivers;
+ uint32_t expected_flags = 0;
+
+ ffa_memory_attributes_t expected_attrs = {
+ .cacheability = FFA_MEMORY_CACHE_WRITE_BACK,
+ .shareability = FFA_MEMORY_INNER_SHAREABLE,
+ .security = FFA_MEMORY_SECURITY_NON_SECURE,
+ .type = (!multiple_receivers && mem_func != FFA_MEM_SHARE_SMC32)
+ ? FFA_MEMORY_NOT_SPECIFIED_MEM
+ : FFA_MEMORY_NORMAL_MEM,
+ };
+
+ struct ffa_memory_access receivers[2] = {
+ ffa_memory_access_init_permissions_from_mem_func(SP_ID(1),
+ mem_func),
+ ffa_memory_access_init_permissions_from_mem_func(SP_ID(2),
+ mem_func),
+ };
+
+ /*
+ * Only pass 1 receiver to `memory_init_and_send` if we are not testing
+ * the multiple-receivers functionality of the hypervisor retrieve
+ * request.
+ */
+ uint32_t receiver_count =
+ multiple_receivers ? ARRAY_SIZE(receivers) : 1;
+
+ uint32_t sent_constituents_count =
+ fragmented ? ARRAY_SIZE(sent_constituents) : 1;
+
+ /* Prepare the composite offset for the comparison. */
+ for (uint32_t i = 0; i < receiver_count; i++) {
+ receivers[i].composite_memory_region_offset =
+ sizeof(struct ffa_memory_region) +
+ receiver_count *
+ sizeof(struct ffa_memory_access);
+ }
+
+ /* Add a page per constituent, so that we exhaust the size of a single
+ * fragment (for testing). In a real world scenario, the whole region
+ * could be described in a single constituent.
+ */
+ for (uint32_t i = 0; i < sent_constituents_count; i++) {
+ sent_constituents[i].address = share_page + i * PAGE_SIZE;
+ sent_constituents[i].page_count = 1;
+ sent_constituents[i].reserved = 0;
+ }
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+ GET_TFTF_MAILBOX(mb);
+
+ switch (mem_func) {
+ case FFA_MEM_SHARE_SMC32:
+ expected_flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
+ break;
+ case FFA_MEM_LEND_SMC32:
+ expected_flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
+ break;
+ case FFA_MEM_DONATE_SMC32:
+ expected_flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
+ break;
+ default:
+ ERROR("Invalid mem_func: %d\n", mem_func);
+ panic();
+ }
+
+ handle = memory_init_and_send(mb.send, MAILBOX_SIZE, SENDER, receivers,
+ receiver_count, sent_constituents,
+ sent_constituents_count, mem_func, &ret);
+ if (handle == FFA_MEMORY_HANDLE_INVALID) {
+ ERROR("Memory share failed: %d\n", ffa_error_code(ret));
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send Hypervisor Retrieve request according to section 17.4.3 of FFA
+ * v1.2-REL0 specification.
+ */
+ if (!hypervisor_retrieve_request(&mb, handle, page, sizeof(page))) {
+ return TEST_RESULT_FAIL;
+ }
+
+ print_memory_region(hypervisor_retrieve_response);
+
+ /*
+ * Verify the received `FFA_MEM_RETRIEVE_RESP` aligns with
+ * transaction description sent above.
+ */
+ expected_response = (struct ffa_memory_region) {
+ .sender = SENDER,
+ .attributes = expected_attrs,
+ .flags = expected_flags,
+ .handle = handle,
+ .tag = 0,
+ .memory_access_desc_size = sizeof(struct ffa_memory_access),
+ .receiver_count = receiver_count,
+ .receivers_offset =
+ offsetof(struct ffa_memory_region, receivers),
+ };
+
+ if (!verify_retrieve_response(hypervisor_retrieve_response,
+ &expected_response)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ retrvd_receivers =
+ ffa_memory_region_get_receiver(hypervisor_retrieve_response, 0);
+
+ if (!verify_receivers(retrvd_receivers,
+ receivers, receiver_count)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ composite = ffa_memory_region_get_composite(
+ hypervisor_retrieve_response, 0);
+
+ if (!verify_composite(composite, composite->constituents,
+ sent_constituents_count, sent_constituents_count)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Reclaim for the SPMC to deallocate any data related to the handle.
+ */
+ ret = ffa_mem_reclaim(handle, 0);
+ if (is_ffa_call_error(ret)) {
+ ERROR("Memory reclaim failed: %d\n", ffa_error_code(ret));
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_hypervisor_share_retrieve(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_SHARE_SMC32, false, false);
+}
+
+test_result_t test_hypervisor_lend_retrieve(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_LEND_SMC32, false, false);
+}
+
+test_result_t test_hypervisor_donate_retrieve(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_DONATE_SMC32, false, false);
+}
+
+test_result_t test_hypervisor_share_retrieve_multiple_receivers(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_SHARE_SMC32, true, false);
+}
+
+test_result_t test_hypervisor_lend_retrieve_multiple_receivers(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_LEND_SMC32, true, false);
+}
+
+test_result_t test_hypervisor_share_retrieve_fragmented(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_SHARE_SMC32, false, true);
+}
+
+test_result_t test_hypervisor_lend_retrieve_fragmented(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_LEND_SMC32, false, true);
+}
+
+/**
+ * Test helper that performs memory sharing operation, and alters the PAS
+ * of the memory, to validate that SPM intersects the operation in case the PAS
+ * is not coherent with its use. Relevant for the functioning of FFA_MEM_LEND
+ * and FFA_MEM_DONATE from NWd to an SP.
+ *
+ * In cases the memory is not in NS state, the SPMC should intersect memory
+ * management call with an appropriate FFA_ERROR.
+ */
+static test_result_t test_ffa_mem_send_realm_expect_fail(
+ uint32_t mem_func, ffa_id_t borrower,
+ struct ffa_memory_region_constituent *constituents,
+ size_t constituents_count, uint64_t delegate_addr)
+{
+ struct ffa_value ret;
+ uint32_t remaining_constituent_count;
+ uint32_t total_length;
+ uint32_t fragment_length;
+ struct mailbox_buffers mb;
+ u_register_t ret_rmm;
+ test_result_t result = TEST_RESULT_FAIL;
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(borrower,
+ mem_func);
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /***********************************************************************
+ * Check if SPMC has ffa_version and expected FFA endpoints are deployed.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ register_custom_sync_exception_handler(data_abort_handler);
+
+ /*
+ * Delegate page to a realm. This should make memory sharing operation
+ * fail.
+ */
+ ret_rmm = host_rmi_granule_delegate((u_register_t)delegate_addr);
+
+ if (ret_rmm != 0UL) {
+ INFO("Delegate operation returns 0x%lx for address %llx\n",
+ ret_rmm, delegate_addr);
+ return TEST_RESULT_FAIL;
+ }
+
+ remaining_constituent_count = ffa_memory_region_init(
+ (struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
+ &receiver, 1, constituents, constituents_count, 0,
+ FFA_MEMORY_REGION_FLAG_CLEAR,
+ FFA_MEMORY_NOT_SPECIFIED_MEM, 0, 0,
+ &total_length, &fragment_length);
+
+ if (remaining_constituent_count != 0) {
+ goto out;
+ }
+
+ switch (mem_func) {
+ case FFA_MEM_LEND_SMC32:
+ ret = ffa_mem_lend(total_length, fragment_length);
+ break;
+ case FFA_MEM_DONATE_SMC32:
+ ret = ffa_mem_donate(total_length, fragment_length);
+ break;
+ default:
+ ERROR("Not expected for func name: %x\n", mem_func);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_DENIED)) {
+ goto out;
+ }
+
+ /* Undelegate to reestablish the same security state for PAS. */
+ ret_rmm = host_rmi_granule_undelegate((u_register_t)delegate_addr);
+
+ for (uint32_t i = 0; i < constituents_count; i++) {
+ uint32_t *ptr = (uint32_t *)constituents[i].address;
+
+ *ptr = 0xFFA;
+ }
+
+ if (get_gpc_abort_triggered()) {
+ ERROR("Exception due to GPC for lend/donate with RME. Not"
+ " expected for this case.\n");
+ result = TEST_RESULT_FAIL;
+ } else {
+ result = TEST_RESULT_SUCCESS;
+ }
+out:
+ unregister_custom_sync_exception_handler();
+
+ if (ret_rmm != 0UL) {
+ INFO("Undelegate operation returns 0x%lx for address %llx\n",
+ ret_rmm, (uint64_t)delegate_addr);
+ return TEST_RESULT_FAIL;
+ }
+
+ return result;
+}
+
+/**
+ * Memory to be shared between partitions is described in a composite, with
+ * various constituents. In an RME system, the memory must be in NS PAS in
+ * operations from NWd to an SP. In case the PAS is not following this
+ * expectation memory lend/donate should fail, and all constituents must
+ * remain in the NS PAS.
+ *
+ * This test validates that if one page in the middle of one of the constituents
+ * is not in the NS PAS the operation fails.
+ */
+test_result_t test_ffa_mem_send_sp_realm_memory(void)
+{
+ test_result_t ret;
+ uint32_t mem_func[] = {FFA_MEM_LEND_SMC32, FFA_MEM_DONATE_SMC32};
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)four_share_pages, 4, 0},
+ {(void *)share_page, 1, 0}
+ };
+
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+
+ for (unsigned j = 0; j < ARRAY_SIZE(mem_func); j++) {
+ for (unsigned int i = 0; i < 4; i++) {
+ /* Address to be delegated to Realm PAS. */
+ uint64_t realm_addr =
+ (uint64_t)&four_share_pages[i * PAGE_SIZE];
+
+ INFO("%s memory with realm addr: %llx\n",
+ mem_func[j] == FFA_MEM_LEND_SMC32
+ ? "Lend"
+ : "Donate",
+ realm_addr);
+
+ ret = test_ffa_mem_send_realm_expect_fail(
+ mem_func[j], SP_ID(1), constituents,
+ constituents_count, realm_addr);
+
+ if (ret != TEST_RESULT_SUCCESS) {
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * Memory to be shared between partitions is described in a composite, with
+ * various constituents. In an RME system, the memory must be in NS PAS in
+ * operations from NWd to an SP. In case the PAS is not following this
+ * expectation memory lend/donate should fail, and all constituents must
+ * remain in the NS PAS.
+ *
+ * This test validates the case in which the memory lend/donate fail in
+ * case one of the constituents in the composite is not in the NS PAS.
+ */
+test_result_t test_ffa_mem_lend_sp_realm_memory_separate_constituent(void)
+{
+ test_result_t ret;
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)four_share_pages, 4, 0},
+ {(void *)share_page, 1, 0}
+ };
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+ /* Address to be delegated to Realm PAS. */
+ uint64_t realm_addr = (uint64_t)&share_page[0];
+
+ INFO("Sharing memory with realm addr: %llx\n", realm_addr);
+
+ ret = test_ffa_mem_send_realm_expect_fail(
+ FFA_MEM_LEND_SMC32, SP_ID(1), constituents,
+ constituents_count, realm_addr);
+
+ return ret;
}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_notifications.c b/tftf/tests/runtime_services/secure_service/test_ffa_notifications.c
new file mode 100644
index 000000000..9ca337a39
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_notifications.c
@@ -0,0 +1,1564 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <irq.h>
+#include <smccc.h>
+
+#include <arch_helpers.h>
+#include <cactus_test_cmds.h>
+#include <ffa_endpoints.h>
+#include <ffa_svc.h>
+#include <platform.h>
+#include <spm_common.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+
+/**
+ * Defining variables to test the per-vCPU notifications.
+ * The conceived test follows the same logic, despite the sender receiver type
+ * of endpoint (VM or secure partition).
+ * Using global variables because these need to be accessed in the cpu on handler
+ * function 'request_notification_get_per_vcpu_on_handler'.
+ * In each specific test function, change 'per_vcpu_receiver' and
+ * 'per_vcpu_sender' have the logic work for:
+ * - NWd to SP;
+ * - SP to NWd;
+ * - SP to SP.
+ */
+static ffa_id_t per_vcpu_receiver;
+static ffa_id_t per_vcpu_sender;
+uint32_t per_vcpu_flags_get;
+static event_t per_vcpu_finished[PLATFORM_CORE_COUNT];
+
+static const struct ffa_uuid expected_sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
+};
+
+static ffa_notification_bitmap_t g_notifications = FFA_NOTIFICATION(0) |
+ FFA_NOTIFICATION(1) |
+ FFA_NOTIFICATION(30) |
+ FFA_NOTIFICATION(50) |
+ FFA_NOTIFICATION(63);
+
+/**
+ * Use FFA_FEATURES to retrieve the ID of:
+ * - Schedule Receiver Interrupt
+ * - Notification Pending Interrupt
+ * - Managed Exit Interrupt
+ * Validate the call works as expected, and they match the used int ID in the
+ * remainder of the tests.
+ */
+test_result_t test_notifications_retrieve_int_ids(void)
+{
+ struct ffa_value ret;
+
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ /* Check if SPMC is OP-TEE at S-EL1 */
+ if (check_spmc_execution_level()) {
+ /* FFA_FEATURES is not yet supported in OP-TEE */
+ return TEST_RESULT_SUCCESS;
+ }
+
+ ret = ffa_features(FFA_FEATURE_NPI);
+ if (is_ffa_call_error(ret) ||
+ ffa_feature_intid(ret) != NOTIFICATION_PENDING_INTERRUPT_INTID) {
+ ERROR("Failed to retrieved NPI (exp: %u, got: %u)\n",
+ NOTIFICATION_PENDING_INTERRUPT_INTID,
+ ffa_feature_intid(ret));
+
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_features(FFA_FEATURE_SRI);
+ if (is_ffa_call_error(ret) ||
+ ffa_feature_intid(ret) != FFA_SCHEDULE_RECEIVER_INTERRUPT_ID) {
+ ERROR("Failed to retrieved SRI (exp: %u, got: %u)\n",
+ FFA_SCHEDULE_RECEIVER_INTERRUPT_ID,
+ ffa_feature_intid(ret));
+
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_features(FFA_FEATURE_MEI);
+ if (is_ffa_call_error(ret) ||
+ ffa_feature_intid(ret) != MANAGED_EXIT_INTERRUPT_ID) {
+ ERROR("Failed to retrieved MEI (exp: %u, got: %u)\n",
+ MANAGED_EXIT_INTERRUPT_ID,
+ ffa_feature_intid(ret));
+
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Helper to create bitmap for NWd VMs.
+ */
+static bool notifications_bitmap_create(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
+{
+ VERBOSE("Creating bitmap for VM %x; cpu count: %u.\n",
+ vm_id, vcpu_count);
+ struct ffa_value ret = ffa_notification_bitmap_create(vm_id,
+ vcpu_count);
+
+ return !is_ffa_call_error(ret);
+}
+
+/**
+ * Helper to destroy bitmap for NWd VMs.
+ */
+static bool notifications_bitmap_destroy(ffa_id_t vm_id)
+{
+ VERBOSE("Destroying bitmap of VM %x.\n", vm_id);
+ struct ffa_value ret = ffa_notification_bitmap_destroy(vm_id);
+
+ return !is_ffa_call_error(ret);
+}
+
+/**
+ * Test notifications bitmap create and destroy interfaces.
+ */
+test_result_t test_ffa_notifications_bitmap_create_destroy(void)
+{
+ const ffa_id_t vm_id = VM_ID(1);
+
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ if (check_spmc_execution_level()) {
+ VERBOSE("OPTEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (!notifications_bitmap_create(vm_id, PLATFORM_CORE_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!notifications_bitmap_destroy(vm_id)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test notifications bitmap destroy in a case the bitmap hasn't been created.
+ */
+test_result_t test_ffa_notifications_destroy_not_created(void)
+{
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ if (check_spmc_execution_level()) {
+ VERBOSE("OPTEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ struct ffa_value ret = ffa_notification_bitmap_destroy(VM_ID(1));
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_DENIED)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test attempt to create notifications bitmap for NWd VM if it had been
+ * already created.
+ */
+test_result_t test_ffa_notifications_create_after_create(void)
+{
+ struct ffa_value ret;
+ const ffa_id_t vm_id = VM_ID(2);
+
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ if (check_spmc_execution_level()) {
+ VERBOSE("OPTEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* First successfully create a notifications bitmap */
+ if (!notifications_bitmap_create(vm_id, 1)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Attempt to do the same to the same VM. */
+ ret = ffa_notification_bitmap_create(vm_id, 1);
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_DENIED)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Destroy to not affect other tests */
+ if (!notifications_bitmap_destroy(vm_id)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Helper function to test FFA_NOTIFICATION_BIND interface.
+ * Receives all arguments to use 'cactus_notification_bind_send_cmd', and
+ * expected response for the test command.
+ *
+ * Returns:
+ * - 'true' if response was obtained and it was as expected;
+ * - 'false' if there was an error with use of FFA_MSG_SEND_DIRECT_REQ, or
+ * the obtained response was not as expected.
+ */
+static bool request_notification_bind(
+ ffa_id_t cmd_dest, ffa_id_t receiver, ffa_id_t sender,
+ ffa_notification_bitmap_t notifications, uint32_t flags,
+ uint32_t expected_resp, uint32_t error_code)
+{
+ struct ffa_value ret;
+
+ VERBOSE("TFTF requesting SP to bind notifications!\n");
+
+ ret = cactus_notification_bind_send_cmd(HYP_ID, cmd_dest, receiver,
+ sender, notifications, flags);
+
+ if (!is_expected_cactus_response(ret, expected_resp, error_code)) {
+ ERROR("Failed notifications bind. receiver: %x; sender: %x\n",
+ receiver, sender);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Helper function to test FFA_NOTIFICATION_UNBIND interface.
+ * Receives all arguments to use 'cactus_notification_unbind_send_cmd', and
+ * expected response for the test command.
+ *
+ * Returns:
+ * - 'true' if response was obtained and it was as expected;
+ * - 'false' if there was an error with use of FFA_MSG_SEND_DIRECT_REQ, or
+ * the obtained response was not as expected.
+ */
+static bool request_notification_unbind(
+ ffa_id_t cmd_dest, ffa_id_t receiver, ffa_id_t sender,
+ ffa_notification_bitmap_t notifications, uint32_t expected_resp,
+ uint32_t error_code)
+{
+ struct ffa_value ret;
+
+ VERBOSE("TFTF requesting SP to unbind notifications!\n");
+
+ ret = cactus_notification_unbind_send_cmd(HYP_ID, cmd_dest, receiver,
+ sender, notifications);
+
+ if (!is_expected_cactus_response(ret, expected_resp, error_code)) {
+ ERROR("Failed notifications unbind. receiver: %x; sender: %x\n",
+ receiver, sender);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Test calls from SPs to the bind and unbind interfaces, expecting success
+ * returns.
+ * This test issues a request via direct messaging to the SP, which executes
+ * the test and responds with the result of the call.
+ */
+test_result_t test_ffa_notifications_sp_bind_unbind(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /** First bind... */
+ if (!request_notification_bind(SP_ID(1), SP_ID(1), SP_ID(2),
+ g_notifications, 0, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!request_notification_bind(SP_ID(1), SP_ID(1), 1,
+ g_notifications, 0, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /** ... then unbind using the same arguments. */
+ if (!request_notification_unbind(SP_ID(1), SP_ID(1), SP_ID(2),
+ g_notifications, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!request_notification_unbind(SP_ID(1), SP_ID(1), 1,
+ g_notifications, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test successful attempt of doing bind and unbind of the same set of
+ * notifications.
+ */
+test_result_t test_ffa_notifications_vm_bind_unbind(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ const ffa_id_t vm_id = VM_ID(1);
+ struct ffa_value ret;
+
+ if (!notifications_bitmap_create(vm_id, 1)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_notification_bind(SP_ID(2), vm_id, 0, g_notifications);
+
+ if (!is_expected_ffa_return(ret, FFA_SUCCESS_SMC32)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_notification_unbind(SP_ID(2), vm_id, g_notifications);
+
+ if (!is_expected_ffa_return(ret, FFA_SUCCESS_SMC32)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!notifications_bitmap_destroy(vm_id)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test expected failure of using a NS FF-A ID for the sender.
+ */
+test_result_t test_ffa_notifications_vm_bind_vm(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ const ffa_id_t vm_id = VM_ID(1);
+ const ffa_id_t sender_id = VM_ID(2);
+ struct ffa_value ret;
+
+ if (!notifications_bitmap_create(vm_id, 1)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_notification_bind(sender_id, vm_id, 0, g_notifications);
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_INVALID_PARAMETER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!notifications_bitmap_destroy(vm_id)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test failure of both bind and unbind in case at least one notification is
+ * already bound to another FF-A endpoint.
+ * Expect error code FFA_ERROR_DENIED.
+ */
+test_result_t test_ffa_notifications_already_bound(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /** Bind first to test */
+ if (!request_notification_bind(SP_ID(1), SP_ID(1), SP_ID(2),
+ g_notifications, 0, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /** Attempt to bind notifications bound in above request. */
+ if (!request_notification_bind(SP_ID(1), SP_ID(1), SP_ID(3),
+ g_notifications, 0, CACTUS_ERROR,
+ FFA_ERROR_DENIED)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /** Attempt to unbind notifications bound in initial request. */
+ if (!request_notification_unbind(SP_ID(1), SP_ID(1), SP_ID(3),
+ g_notifications, CACTUS_ERROR,
+ FFA_ERROR_DENIED)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /** Reset the state the SP's notifications state. */
+ if (!request_notification_unbind(SP_ID(1), SP_ID(1), SP_ID(2),
+ g_notifications, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Try to bind/unbind notifications spoofing the identity of the receiver.
+ * Commands will be sent to SP_ID(1), which will use SP_ID(3) as the receiver.
+ * Expect error code FFA_ERROR_INVALID_PARAMETER.
+ */
+test_result_t test_ffa_notifications_bind_unbind_spoofing(void)
+{
+ ffa_notification_bitmap_t notifications = FFA_NOTIFICATION(8);
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ if (!request_notification_bind(SP_ID(1), SP_ID(3), SP_ID(2),
+ notifications, 0, CACTUS_ERROR,
+ FFA_ERROR_INVALID_PARAMETER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!request_notification_unbind(SP_ID(1), SP_ID(3), SP_ID(2),
+ notifications, CACTUS_ERROR,
+ FFA_ERROR_INVALID_PARAMETER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Call FFA_NOTIFICATION_BIND with notifications bitmap zeroed.
+ * Expecting error code FFA_ERROR_INVALID_PARAMETER.
+ */
+test_result_t test_ffa_notifications_bind_unbind_zeroed(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ if (!request_notification_bind(SP_ID(1), SP_ID(1), SP_ID(2),
+ 0, 0, CACTUS_ERROR,
+ FFA_ERROR_INVALID_PARAMETER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!request_notification_unbind(SP_ID(1), SP_ID(1), SP_ID(2),
+ 0, CACTUS_ERROR,
+ FFA_ERROR_INVALID_PARAMETER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Helper function to test FFA_NOTIFICATION_GET interface.
+ * Receives all arguments to use 'cactus_notification_get_send_cmd', and returns
+ * the received response. Depending on the testing scenario, this will allow
+ * to validate if the returned bitmaps are as expected.
+ *
+ * Returns:
+ * - 'true' if response was obtained.
+ * - 'false' if there was an error sending the request.
+ */
+static bool request_notification_get(
+ ffa_id_t cmd_dest, ffa_id_t receiver, uint32_t vcpu_id, uint32_t flags,
+ bool check_npi_handled, struct ffa_value *response)
+{
+ VERBOSE("TFTF requesting SP to get notifications!\n");
+
+ *response = cactus_notification_get_send_cmd(HYP_ID, cmd_dest,
+ receiver, vcpu_id,
+ flags, check_npi_handled);
+
+ return is_ffa_direct_response(*response);
+}
+
+static bool request_notification_set(
+ ffa_id_t cmd_dest, ffa_id_t receiver, ffa_id_t sender, uint32_t flags,
+ ffa_notification_bitmap_t notifications, ffa_id_t echo_dest,
+ uint32_t exp_resp, int32_t exp_error)
+{
+ struct ffa_value ret;
+
+ VERBOSE("TFTF requesting SP %x (as %x) to set notifications to %x\n",
+ cmd_dest, sender, receiver);
+
+ ret = cactus_notifications_set_send_cmd(HYP_ID, cmd_dest, receiver,
+ sender, flags, notifications,
+ echo_dest);
+
+ if (!is_expected_cactus_response(ret, exp_resp, exp_error)) {
+ ERROR("Failed notifications set. receiver: %x; sender: %x\n",
+ receiver, sender);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Helper to set notification. If sender is VM, the function will call directly
+ * FFA_NOTIFICATION_SET, if it is an SP it will request the SP to set
+ * notifications. In both cases it is expected a successful outcome.
+ */
+static bool notification_set(ffa_id_t receiver, ffa_id_t sender,
+ uint32_t flags,
+ ffa_notification_bitmap_t notifications)
+{
+ struct ffa_value ret;
+
+ /* Sender sets notifications to receiver. */
+ if (!IS_SP_ID(sender)) {
+ VERBOSE("VM %x Setting notifications %llx to receiver %x\n",
+ sender, notifications, receiver);
+ ret = ffa_notification_set(sender, receiver, flags, notifications);
+
+ if (!is_expected_ffa_return(ret, FFA_SUCCESS_SMC32)) {
+ ERROR("Failed notifications set. receiver: %x; sender: %x\n",
+ receiver, sender);
+ return false;
+ }
+ return true;
+ }
+
+ return request_notification_set(sender, receiver, sender, flags,
+ notifications, 0, CACTUS_SUCCESS, 0);
+}
+
+/**
+ * Check that SP's response to CACTUS_NOTIFICATION_GET_CMD is as expected.
+ */
+static bool is_notifications_get_as_expected(
+ struct ffa_value *ret, uint64_t exp_from_sp, uint64_t exp_from_vm,
+ ffa_id_t receiver)
+{
+ uint64_t from_sp;
+ uint64_t from_vm;
+ bool success_ret;
+
+ /**
+ * If receiver ID is SP, this is to evaluate the response to test
+ * command 'CACTUS_NOTIFICATION_GET_CMD'.
+ */
+ if (IS_SP_ID(receiver)) {
+ success_ret = (cactus_get_response(*ret) == CACTUS_SUCCESS);
+ from_sp = cactus_notifications_get_from_sp(*ret);
+ from_vm = cactus_notifications_get_from_vm(*ret);
+ } else {
+ /**
+ * Else, this is to evaluate the return of FF-A call:
+ * ffa_notification_get.
+ */
+ success_ret = (ffa_func_id(*ret) == FFA_SUCCESS_SMC32);
+ from_sp = ffa_notifications_get_from_sp(*ret);
+ from_vm = ffa_notifications_get_from_vm(*ret);
+ }
+
+ if (success_ret != true ||
+ exp_from_sp != from_sp ||
+ exp_from_vm != from_vm) {
+ VERBOSE("Notifications not as expected:\n"
+ " from sp: %llx exp: %llx\n"
+ " from vm: %llx exp: %llx\n",
+ from_sp, exp_from_sp, from_vm, exp_from_vm);
+ return false;
+ }
+
+ return true;
+}
+
+static bool is_notifications_info_get_as_expected(
+ struct ffa_value *ret, uint16_t *ids, uint32_t *lists_sizes,
+ const uint32_t max_ids_count, uint32_t lists_count, bool more_pending)
+{
+ if (lists_count != ffa_notifications_info_get_lists_count(*ret) ||
+ more_pending != ffa_notifications_info_get_more_pending(*ret)) {
+ ERROR("Notification info get not as expected.\n"
+ " Lists counts: %u; more pending %u\n",
+ ffa_notifications_info_get_lists_count(*ret),
+ ffa_notifications_info_get_more_pending(*ret));
+ dump_ffa_value(*ret);
+ return false;
+ }
+
+ for (uint32_t i = 0; i < lists_count; i++) {
+ uint32_t cur_size =
+ ffa_notifications_info_get_list_size(*ret,
+ i + 1);
+
+ if (lists_sizes[i] != cur_size) {
+ ERROR("Expected list size[%u] %u != %u\n", i,
+ lists_sizes[i], cur_size);
+ return false;
+ }
+ }
+
+ /* Compare the IDs list */
+ if (memcmp(&ret->arg3, ids, sizeof(ids[0]) * max_ids_count) != 0) {
+ ERROR("List of IDs not as expected\n");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Helper to bind notification and set it.
+ * If receiver is SP it will request SP to perform the bind, else invokes
+ * FFA_NOTIFICATION_BIND.
+ * If Sender is SP it will request it to perform the set, else invokes
+ * FFA_NOTIFICATION_SET.
+ */
+static bool notification_bind_and_set(ffa_id_t sender,
+ ffa_id_t receiver, ffa_notification_bitmap_t notifications, uint32_t flags)
+{
+ struct ffa_value ret;
+ uint32_t flags_bind = flags & FFA_NOTIFICATIONS_FLAG_PER_VCPU;
+
+ /* Receiver binds notifications to sender. */
+ if (!IS_SP_ID(receiver)) {
+ ret = ffa_notification_bind(sender, receiver,
+ flags_bind, notifications);
+
+ if (is_ffa_call_error(ret)) {
+ return false;
+ }
+ } else {
+ if (!request_notification_bind(receiver, receiver, sender,
+ notifications, flags_bind,
+ CACTUS_SUCCESS,
+ 0)) {
+ return false;
+ }
+ }
+
+ return notification_set(receiver, sender, flags, notifications);
+}
+
+/**
+ * Helper to request SP to get the notifications and validate the return.
+ */
+static bool notification_get_and_validate(
+ ffa_id_t receiver, ffa_notification_bitmap_t exp_from_sp,
+ ffa_notification_bitmap_t exp_from_vm, uint32_t vcpu_id,
+ uint32_t flags, bool check_npi_handled)
+{
+ struct ffa_value ret;
+
+ /* Receiver gets pending notifications. */
+ if (IS_SP_ID(receiver)) {
+ request_notification_get(receiver, receiver, vcpu_id, flags,
+ check_npi_handled, &ret);
+ } else {
+ ret = ffa_notification_get(receiver, vcpu_id, flags);
+ }
+
+ return is_notifications_get_as_expected(&ret, exp_from_sp, exp_from_vm,
+ receiver);
+}
+
+static bool notifications_info_get(
+ uint16_t *expected_ids, uint32_t expected_lists_count,
+ uint32_t *expected_lists_sizes, const uint32_t max_ids_count,
+ bool expected_more_pending)
+{
+ struct ffa_value ret;
+
+ VERBOSE("Getting pending notification's info.\n");
+
+ ret = ffa_notification_info_get();
+
+ return !is_ffa_call_error(ret) &&
+ is_notifications_info_get_as_expected(&ret, expected_ids,
+ expected_lists_sizes,
+ max_ids_count,
+ expected_lists_count,
+ expected_more_pending);
+}
+
+static volatile int schedule_receiver_interrupt_received;
+
+static int schedule_receiver_interrupt_handler(void *data)
+{
+ assert(schedule_receiver_interrupt_received == 0);
+ schedule_receiver_interrupt_received = 1;
+ return 0;
+}
+
+/**
+ * Enable the Schedule Receiver Interrupt and register the respective
+ * handler.
+ */
+static void schedule_receiver_interrupt_init(void)
+{
+ tftf_irq_register_handler(FFA_SCHEDULE_RECEIVER_INTERRUPT_ID,
+ schedule_receiver_interrupt_handler);
+
+ tftf_irq_enable(FFA_SCHEDULE_RECEIVER_INTERRUPT_ID, 0xA);
+}
+
+/**
+ * Disable the Schedule Receiver Interrupt and unregister the respective
+ * handler.
+ */
+static void schedule_receiver_interrupt_deinit(void)
+{
+ tftf_irq_disable(FFA_SCHEDULE_RECEIVER_INTERRUPT_ID);
+ tftf_irq_unregister_handler(FFA_SCHEDULE_RECEIVER_INTERRUPT_ID);
+ schedule_receiver_interrupt_received = 0;
+}
+
+bool check_schedule_receiver_interrupt_handled(void)
+{
+ if (schedule_receiver_interrupt_received == 1) {
+ VERBOSE("Schedule Receiver Interrupt handled!\n");
+ schedule_receiver_interrupt_received = 0;
+ return true;
+ }
+ VERBOSE("Schedule Receiver Interrupt NOT handled!\n");
+ return false;
+}
+
+/**
+ * Base function to test notifications signaling with an SP as a receiver.
+ */
+static test_result_t base_test_global_notifications_signal_sp(
+ const ffa_id_t sender, const ffa_id_t receiver,
+ const ffa_notification_bitmap_t notifications, const uint32_t flags_get)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ if (!IS_SP_ID(receiver)) {
+ ERROR("Receiver is expected to be an SP ID!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Variables to validate calls to FFA_NOTIFICATION_INFO_GET. */
+ uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+ uint32_t lists_count;
+ uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ schedule_receiver_interrupt_init();
+
+ if (!notification_bind_and_set(sender, receiver, notifications,
+ FFA_NOTIFICATIONS_FLAG_DELAY_SRI)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!check_schedule_receiver_interrupt_handled()) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /**
+ * Simple list of IDs expected on return from FFA_NOTIFICATION_INFO_GET.
+ */
+ ids[0] = receiver;
+ lists_count = 1;
+
+ if (!notifications_info_get(ids, lists_count, lists_sizes,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
+ false)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!notification_get_and_validate(
+ receiver, IS_SP_ID(sender) ? notifications : 0,
+ !IS_SP_ID(sender) ? notifications : 0, 0, flags_get, true)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!request_notification_unbind(receiver, receiver, sender,
+ notifications, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_deinit();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test to validate a VM can signal an SP.
+ */
+test_result_t test_ffa_notifications_vm_signals_sp(void)
+{
+ return base_test_global_notifications_signal_sp(
+ 1, SP_ID(1), FFA_NOTIFICATION(1) | FFA_NOTIFICATION(60),
+ FFA_NOTIFICATIONS_FLAG_BITMAP_VM);
+}
+
+/**
+ * Test to validate an SP can signal an SP.
+ */
+test_result_t test_ffa_notifications_sp_signals_sp(void)
+{
+ return base_test_global_notifications_signal_sp(
+ SP_ID(1), SP_ID(2), g_notifications,
+ FFA_NOTIFICATIONS_FLAG_BITMAP_SP);
+}
+
+/**
+ * Test to validate an SP can signal a VM.
+ */
+test_result_t test_ffa_notifications_sp_signals_vm(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ const ffa_id_t sender = SP_ID(1);
+ const ffa_id_t receiver = VM_ID(1);
+ uint32_t get_flags = FFA_NOTIFICATIONS_FLAG_BITMAP_SP;
+ struct ffa_value ret;
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ /* Variables to validate calls to FFA_NOTIFICATION_INFO_GET. */
+ uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+ uint32_t lists_count;
+ uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+
+ /* Ask SPMC to allocate notifications bitmap. */
+ if (!notifications_bitmap_create(receiver, 1)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_init();
+
+ /* Request receiver to bind a set of notifications to the sender. */
+ if (!notification_bind_and_set(sender, receiver, g_notifications,
+ FFA_NOTIFICATIONS_FLAG_DELAY_SRI)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ if (!check_schedule_receiver_interrupt_handled()) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * FFA_NOTIFICATION_INFO_GET return list should be simple, containing
+ * only the receiver's ID.
+ */
+ ids[0] = receiver;
+ lists_count = 1;
+
+ if (!notifications_info_get(ids, lists_count, lists_sizes,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
+ false)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Get pending notifications, and retrieve response. */
+ if (!notification_get_and_validate(receiver, g_notifications, 0, 0,
+ get_flags, false)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_notification_unbind(sender, receiver, g_notifications);
+
+ if (!is_expected_ffa_return(ret, FFA_SUCCESS_SMC32)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ if (!notifications_bitmap_destroy(receiver)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_deinit();
+
+ return result;
+}
+
+/**
+ * Test to validate it is not possible to unbind a pending notification.
+ */
+test_result_t test_ffa_notifications_unbind_pending(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ const ffa_id_t receiver = SP_ID(1);
+ const ffa_id_t sender = VM_ID(1);
+ const ffa_notification_bitmap_t notifications = FFA_NOTIFICATION(30) |
+ FFA_NOTIFICATION(35);
+ uint32_t get_flags = FFA_NOTIFICATIONS_FLAG_BITMAP_VM;
+
+ schedule_receiver_interrupt_init();
+
+ /* Request receiver to bind a set of notifications to the sender. */
+ if (!notification_bind_and_set(sender, receiver, notifications, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Attempt to unbind the pending notification, but expect error return
+ * given the notification is pending.
+ */
+ if (!request_notification_unbind(receiver, receiver, sender,
+ FFA_NOTIFICATION(30),
+ CACTUS_ERROR, FFA_ERROR_DENIED)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!check_schedule_receiver_interrupt_handled()) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Request receiver partition to get pending notifications from VMs.
+ * Only notification 30 is expected.
+ */
+ if (!notification_get_and_validate(receiver, 0, notifications, 0,
+ get_flags, false)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Unbind all notifications, to not interfere with other tests. */
+ if (!request_notification_unbind(receiver, receiver, sender,
+ notifications, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_deinit();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test the result of a call to FFA_NOTIFICATION_INFO_GET if no pending
+ * notifications.
+ */
+test_result_t test_ffa_notifications_info_get_none(void)
+{
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ if (check_spmc_execution_level()) {
+ VERBOSE("OPTEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ struct ffa_value ret;
+
+ ret = ffa_notification_info_get();
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_NO_DATA)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * CPU_ON handler for testing per-vCPU notifications to SPs (either from VMs
+ * or from SPs). It requests the SP to retrieve its pending notifications
+ * within its current Execution Context. The SP shall obtain all per-vCPU
+ * targeted to the running vCPU.
+ */
+static test_result_t request_notification_get_per_vcpu_on_handler(void)
+{
+ unsigned int core_pos = get_current_core_id();
+ test_result_t result = TEST_RESULT_FAIL;
+
+ uint64_t exp_from_vm = 0;
+ uint64_t exp_from_sp = 0;
+
+ if (IS_SP_ID(per_vcpu_sender)) {
+ exp_from_sp = FFA_NOTIFICATION(core_pos);
+ } else {
+ exp_from_vm = FFA_NOTIFICATION(core_pos);
+ }
+
+ VERBOSE("Request get per-vCPU notification to %x, core: %u.\n",
+ per_vcpu_receiver, core_pos);
+
+ /*
+ * Secure Partitions secondary ECs need one round of ffa_run to reach
+ * the message loop.
+ */
+ if (!spm_core_sp_init(per_vcpu_receiver)) {
+ goto out;
+ }
+
+ /*
+ * Request to get notifications sent to the respective vCPU.
+ * Check also if NPI was handled by the receiver. It should have been
+ * pended at notifications set, in the respective vCPU.
+ */
+ if (!notification_get_and_validate(
+ per_vcpu_receiver, exp_from_sp, exp_from_vm, core_pos,
+ per_vcpu_flags_get, true)) {
+ goto out;
+ }
+
+ result = TEST_RESULT_SUCCESS;
+
+out:
+ /* Tell the lead CPU that the calling CPU has completed the test. */
+ tftf_send_event(&per_vcpu_finished[core_pos]);
+
+ return result;
+}
+
+static test_result_t base_npi_enable_per_cpu(bool enable)
+{
+ test_result_t result = TEST_RESULT_FAIL;
+ uint32_t core_pos = get_current_core_id();
+
+ VERBOSE("Request SP %x to enable NPI in core %u\n",
+ per_vcpu_receiver, core_pos);
+
+ /*
+ * Secure Partitions secondary ECs need one round of ffa_run to reach
+ * the message loop.
+ */
+ if (!spm_core_sp_init(per_vcpu_receiver)) {
+ goto out;
+ }
+
+ result = TEST_RESULT_SUCCESS;
+
+out:
+ /* Tell the lead CPU that the calling CPU has completed the test. */
+ tftf_send_event(&per_vcpu_finished[core_pos]);
+
+ return result;
+}
+
+static test_result_t npi_enable_per_vcpu_on_handler(void)
+{
+ return base_npi_enable_per_cpu(true);
+}
+
+static test_result_t npi_disable_per_vcpu_on_handler(void)
+{
+ return base_npi_enable_per_cpu(false);
+}
+/**
+ * Base function to test signaling of per-vCPU notifications.
+ * Test whole flow between two FF-A endpoints: binding, getting notification
+ * info, and getting pending notifications.
+ * Each vCPU will receive a notification whose ID is the same as the core
+ * position.
+ */
+static test_result_t base_test_per_vcpu_notifications(ffa_id_t sender,
+ ffa_id_t receiver)
+{
+ /*
+ * Manually set variables to validate what should be the return of to
+ * FFA_NOTIFICATION_INFO_GET.
+ */
+ uint16_t exp_ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {
+ receiver, 0, 1, 2,
+ receiver, 3, 4, 5,
+ receiver, 6, 7, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ };
+ uint32_t exp_lists_count = 3;
+ uint32_t exp_lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {
+ 3, 3, 2, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+
+ const bool exp_more_notif_pending = false;
+ test_result_t result = TEST_RESULT_SUCCESS;
+ uint64_t notifications_to_unbind = 0;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ per_vcpu_flags_get = IS_SP_ID(sender)
+ ? FFA_NOTIFICATIONS_FLAG_BITMAP_SP
+ : FFA_NOTIFICATIONS_FLAG_BITMAP_VM;
+
+ /* Setting global variables to be accessed by the cpu_on handler. */
+ per_vcpu_receiver = receiver;
+ per_vcpu_sender = sender;
+
+ /* Boot all cores and enable the NPI in all of them. */
+ if (spm_run_multi_core_test(
+ (uintptr_t)npi_enable_per_vcpu_on_handler,
+ per_vcpu_finished) != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Prepare notifications bitmap to request Cactus to bind them as
+ * per-vCPU.
+ */
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ notifications_to_unbind |= FFA_NOTIFICATION(i);
+
+ uint32_t flags = FFA_NOTIFICATIONS_FLAG_DELAY_SRI |
+ FFA_NOTIFICATIONS_FLAG_PER_VCPU |
+ FFA_NOTIFICATIONS_FLAGS_VCPU_ID((uint16_t)i);
+
+ if (!notification_bind_and_set(sender,
+ receiver,
+ FFA_NOTIFICATION(i),
+ flags)) {
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Call FFA_NOTIFICATION_INFO_GET and validate return. */
+ if (!notifications_info_get(exp_ids, exp_lists_count, exp_lists_sizes,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
+ exp_more_notif_pending)) {
+ ERROR("Info Get Failed....\n");
+ result = TEST_RESULT_FAIL;
+ goto out;
+ }
+
+ /*
+ * Request SP to get notifications in core 0, as this is not iterated
+ * at the CPU ON handler.
+ * Set `check_npi_handled` to true, as the receiver is supposed to be
+ * preempted by the NPI.
+ */
+ if (!notification_get_and_validate(
+ receiver, IS_SP_ID(sender) ? FFA_NOTIFICATION(0) : 0,
+ !IS_SP_ID(sender) ? FFA_NOTIFICATION(0) : 0, 0,
+ per_vcpu_flags_get, true)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Bring up all the cores, and request the receiver to get notifications
+ * in each one of them.
+ */
+ if (spm_run_multi_core_test(
+ (uintptr_t)request_notification_get_per_vcpu_on_handler,
+ per_vcpu_finished) != TEST_RESULT_SUCCESS) {
+ result = TEST_RESULT_FAIL;
+ }
+
+out:
+ /* As a clean-up, unbind notifications. */
+ if (!request_notification_unbind(receiver, receiver,
+ sender,
+ notifications_to_unbind,
+ CACTUS_SUCCESS, 0)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Boot all cores and DISABLE the NPI in all of them. */
+ if (spm_run_multi_core_test(
+ (uintptr_t)npi_disable_per_vcpu_on_handler,
+ per_vcpu_finished) != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return result;
+}
+
+/**
+ * Test to validate a VM can signal a per-vCPU notification to an SP.
+ */
+test_result_t test_ffa_notifications_vm_signals_sp_per_vcpu(void)
+{
+ return base_test_per_vcpu_notifications(0, SP_ID(1));
+}
+
+/**
+ * Test to validate an SP can signal a per-vCPU notification to an SP.
+ */
+test_result_t test_ffa_notifications_sp_signals_sp_per_vcpu(void)
+{
+ return base_test_per_vcpu_notifications(SP_ID(1), SP_ID(2));
+}
+
+static test_result_t notification_get_per_vcpu_on_handler(void)
+{
+ unsigned int core_pos = get_current_core_id();
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ VERBOSE("Getting per-vCPU notifications from %x, core: %u.\n",
+ per_vcpu_receiver, core_pos);
+
+ if (!spm_core_sp_init(per_vcpu_sender)) {
+ goto out;
+ }
+
+ if (!notification_get_and_validate(per_vcpu_receiver,
+ FFA_NOTIFICATION(core_pos), 0,
+ core_pos,
+ FFA_NOTIFICATIONS_FLAG_BITMAP_SP,
+ false)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+out:
+ /* Tell the lead CPU that the calling CPU has completed the test. */
+ tftf_send_event(&per_vcpu_finished[core_pos]);
+
+ return result;
+}
+
+/**
+ * Test whole flow from binding, to getting notifications' info, and getting
+ * pending notifications, namely signaling of notifications from SP to a VM.
+ * Each vCPU will receive a notification whose ID is the same as the core
+ * position.
+ */
+test_result_t test_ffa_notifications_sp_signals_vm_per_vcpu(void)
+{
+ /* Making a VM the receiver, and an SP the sender */
+ per_vcpu_receiver = VM_ID(1);
+ per_vcpu_sender = SP_ID(2);
+
+ /**
+ * Manually set variables to validate what should be the return of to
+ * FFA_NOTIFICATION_INFO_GET.
+ */
+ uint16_t exp_ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {
+ per_vcpu_receiver, 0, 1, 2,
+ per_vcpu_receiver, 3, 4, 5,
+ per_vcpu_receiver, 6, 7, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ };
+ uint32_t exp_lists_count = 3;
+ uint32_t exp_lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {
+ 3, 3, 2, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+
+ const bool exp_more_notif_pending = false;
+ test_result_t result = TEST_RESULT_SUCCESS;
+ uint64_t notifications_to_unbind = 0;
+ struct ffa_value ret;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Create bitmap for receiver. */
+ if (!notifications_bitmap_create(per_vcpu_receiver,
+ PLATFORM_CORE_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Bind notifications, and request Cactus SP to set them. */
+ for (uint32_t i = 0U; i < PLATFORM_CORE_COUNT; i++) {
+ notifications_to_unbind |= FFA_NOTIFICATION(i);
+
+ uint32_t flags = FFA_NOTIFICATIONS_FLAG_DELAY_SRI |
+ FFA_NOTIFICATIONS_FLAG_PER_VCPU |
+ FFA_NOTIFICATIONS_FLAGS_VCPU_ID((uint16_t)i);
+
+ if (!notification_bind_and_set(per_vcpu_sender,
+ per_vcpu_receiver,
+ FFA_NOTIFICATION(i),
+ flags)) {
+ return TEST_RESULT_FAIL;
+ };
+ }
+
+ /* Call FFA_NOTIFICATION_INFO_GET and validate return. */
+ if (!notifications_info_get(exp_ids, exp_lists_count, exp_lists_sizes,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
+ exp_more_notif_pending)) {
+ ERROR("Info Get Failed....\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Get notifications in core 0, as it is not iterated at the CPU ON
+ * handler.
+ */
+ if (!notification_get_and_validate(per_vcpu_receiver,
+ FFA_NOTIFICATION(0), 0, 0,
+ FFA_NOTIFICATIONS_FLAG_BITMAP_SP,
+ false)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Bring up all the cores, and get notifications in each one of them. */
+ if (spm_run_multi_core_test(
+ (uintptr_t)notification_get_per_vcpu_on_handler,
+ per_vcpu_finished) != TEST_RESULT_SUCCESS) {
+ ERROR("Failed to get per-vCPU notifications\n");
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* As a clean-up, unbind notifications. */
+ ret = ffa_notification_unbind(per_vcpu_sender, per_vcpu_receiver,
+ notifications_to_unbind);
+ if (is_ffa_call_error(ret)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ return result;
+}
+
+/**
+ * Test to validate behavior in SWd if the SRI is not delayed. If the
+ * notification setter handled a managed exit it is indicative the SRI was
+ * sent immediately.
+ */
+test_result_t test_ffa_notifications_sp_signals_sp_immediate_sri(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ const ffa_id_t sender = SP_ID(1);
+ const ffa_id_t receiver = SP_ID(2);
+ uint32_t get_flags = FFA_NOTIFICATIONS_FLAG_BITMAP_SP;
+ struct ffa_value ret;
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ /** Variables to validate calls to FFA_NOTIFICATION_INFO_GET. */
+ uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+ uint32_t lists_count;
+ uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+
+ ids[0] = receiver;
+ lists_count = 1;
+
+ schedule_receiver_interrupt_init();
+
+ /* Request receiver to bind a set of notifications to the sender. */
+ if (!request_notification_bind(receiver, receiver, sender,
+ g_notifications, 0, CACTUS_SUCCESS, 0)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Request sender to set notification, and expect the response is
+ * MANAGED_EXIT_INTERRUPT_ID.
+ */
+ if (!request_notification_set(sender, receiver, sender, 0,
+ g_notifications, 0,
+ MANAGED_EXIT_INTERRUPT_ID, 0)) {
+ ERROR("SRI not handled immediately!\n");
+ result = TEST_RESULT_FAIL;
+ } else {
+ VERBOSE("SP %x did a managed exit.\n", sender);
+ }
+
+ if (!check_schedule_receiver_interrupt_handled()) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Call FFA_NOTIFICATION_INFO_GET and validate return. */
+ if (!notifications_info_get(ids, lists_count, lists_sizes,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
+ false)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Validate notification get. */
+ if (!request_notification_get(receiver, receiver, 0, get_flags, false, &ret) ||
+ !is_notifications_get_as_expected(&ret, g_notifications, 0,
+ receiver)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Resume setter Cactus in the handling of CACTUS_NOTIFICATIONS_SET_CMD.
+ */
+ ret = cactus_resume_after_managed_exit(HYP_ID, sender);
+
+ /* Expected result to CACTUS_NOTIFICATIONS_SET_CMD. */
+ if (!is_expected_cactus_response(ret, CACTUS_SUCCESS, 0)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Unbind for clean-up. */
+ if (!request_notification_unbind(receiver, receiver, sender,
+ g_notifications, CACTUS_SUCCESS, 0)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_deinit();
+
+ return result;
+}
+
+/**
+ * Test to validate behavior in SWd if the SRI is delayed.
+ */
+test_result_t test_ffa_notifications_sp_signals_sp_delayed_sri(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ const ffa_id_t sender = SP_ID(3);
+ const ffa_id_t receiver = SP_ID(2);
+ const ffa_id_t echo_dest = SP_ID(1);
+ uint32_t echo_dest_cmd_count = 0;
+ uint32_t get_flags = FFA_NOTIFICATIONS_FLAG_BITMAP_SP;
+ struct ffa_value ret;
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ /** Variables to validate calls to FFA_NOTIFICATION_INFO_GET. */
+ uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+ uint32_t lists_count;
+ uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+
+ ids[0] = receiver;
+ lists_count = 1;
+
+ schedule_receiver_interrupt_init();
+
+ /* Request receiver to bind a set of notifications to the sender. */
+ if (!request_notification_bind(receiver, receiver, sender,
+ g_notifications, 0, CACTUS_SUCCESS, 0)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ ret = cactus_get_req_count_send_cmd(HYP_ID, echo_dest);
+
+ if (cactus_get_response(ret) == CACTUS_SUCCESS) {
+ /*
+ * Save the command count from the echo_dest, to validate it
+ * has been incremented after the request to set notifications.
+ */
+ echo_dest_cmd_count = cactus_get_req_count(ret);
+ VERBOSE("Partition %x command count %u.\n", echo_dest,
+ echo_dest_cmd_count);
+ } else {
+ VERBOSE("Failed to get cmds count from %u\n", echo_dest);
+ result = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Request sender to set notification with Delay SRI flag, and specify
+ * echo destination.
+ */
+ if (!request_notification_set(sender, receiver, sender,
+ FFA_NOTIFICATIONS_FLAG_DELAY_SRI,
+ g_notifications, echo_dest,
+ CACTUS_SUCCESS, 0)) {
+ VERBOSE("Failed to set notifications!\n");
+ result = TEST_RESULT_FAIL;
+ }
+
+ if (!check_schedule_receiver_interrupt_handled()) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Get command count again from echo_dest, to validate that it has been
+ * incremented by one. This should indicate the notification setter has
+ * issued a request to echo_dest right after the notification set, thus
+ * proving the SRI hasn't been sent right after FFA_NOTIFICATION_SET.
+ */
+ ret = cactus_get_req_count_send_cmd(HYP_ID, echo_dest);
+ if (cactus_get_response(ret) == CACTUS_SUCCESS) {
+ if (cactus_get_req_count(ret) == echo_dest_cmd_count + 1) {
+ VERBOSE("SRI successfully delayed.\n");
+ } else {
+ VERBOSE("Failed to get cmds count from %u.\n",
+ echo_dest);
+ result = TEST_RESULT_FAIL;
+ }
+ } else {
+ VERBOSE("Failed to get cmds count from %x\n", echo_dest);
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Call FFA_NOTIFICATION_INFO_GET and validate return. */
+ if (!notifications_info_get(ids, lists_count, lists_sizes,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
+ false)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Validate notification get. */
+ if (!request_notification_get(receiver, receiver, 0, get_flags, false, &ret) ||
+ !is_notifications_get_as_expected(&ret, g_notifications, 0,
+ receiver)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Unbind for clean-up. */
+ if (!request_notification_unbind(receiver, receiver, sender,
+ g_notifications, CACTUS_SUCCESS, 0)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_deinit();
+
+ return result;
+}
+
+test_result_t notifications_set_per_vcpu_on_handler(void)
+{
+ unsigned int core_pos = get_current_core_id();
+ test_result_t result = TEST_RESULT_FAIL;
+
+ if (!spm_core_sp_init(per_vcpu_sender)) {
+ goto out;
+ }
+
+ if (!notification_set(per_vcpu_receiver, per_vcpu_sender,
+ FFA_NOTIFICATIONS_FLAG_DELAY_SRI |
+ FFA_NOTIFICATIONS_FLAG_PER_VCPU |
+ FFA_NOTIFICATIONS_FLAGS_VCPU_ID(0),
+ FFA_NOTIFICATION(core_pos))) {
+ goto out;
+ }
+
+ result = TEST_RESULT_SUCCESS;
+
+out:
+ /* Tell the lead CPU that the calling CPU has completed the test. */
+ tftf_send_event(&per_vcpu_finished[core_pos]);
+
+ return result;
+}
+
+test_result_t test_ffa_notifications_mp_sp_signals_up_sp(void)
+{
+ ffa_notification_bitmap_t to_bind = 0;
+
+ /* prepare info get variables. */
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Setting per-vCPU sender and receiver IDs. */
+ per_vcpu_sender = SP_ID(2); /* MP SP */
+ per_vcpu_receiver = SP_ID(3); /* UP SP */
+
+ schedule_receiver_interrupt_init();
+
+ /* Prepare notifications bitmap to have one bit platform core. */
+ for (uint32_t i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ to_bind |= FFA_NOTIFICATION(i);
+ }
+
+ /* Request receiver to bind a set of notifications to the sender. */
+ if (!request_notification_bind(per_vcpu_receiver, per_vcpu_receiver,
+ per_vcpu_sender, to_bind,
+ FFA_NOTIFICATIONS_FLAG_PER_VCPU,
+ CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Boot up system, and then request sender to signal notification from
+ * every core into into receiver's only vCPU. Delayed SRI.
+ */
+ if (!notification_set(per_vcpu_receiver, per_vcpu_sender,
+ FFA_NOTIFICATIONS_FLAG_DELAY_SRI |
+ FFA_NOTIFICATIONS_FLAG_PER_VCPU |
+ FFA_NOTIFICATIONS_FLAGS_VCPU_ID(0),
+ FFA_NOTIFICATION(0))) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (spm_run_multi_core_test(
+ (uintptr_t)notifications_set_per_vcpu_on_handler,
+ per_vcpu_finished) != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!check_schedule_receiver_interrupt_handled()) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!notification_get_and_validate(per_vcpu_receiver, to_bind, 0, 0,
+ FFA_NOTIFICATIONS_FLAG_BITMAP_SP, true)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Request unbind. */
+ if (!request_notification_unbind(per_vcpu_receiver, per_vcpu_receiver,
+ per_vcpu_sender, to_bind,
+ CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_deinit();
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_rxtx_map.c b/tftf/tests/runtime_services/secure_service/test_ffa_rxtx_map.c
deleted file mode 100644
index 1b47c5f99..000000000
--- a/tftf/tests/runtime_services/secure_service/test_ffa_rxtx_map.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <debug.h>
-
-#include <test_helpers.h>
-#include <xlat_tables_defs.h>
-
-static struct mailbox_buffers mb;
-
-static test_result_t test_ffa_rxtx_map(uint32_t expected_return)
-{
- smc_ret_values ret;
-
- /**********************************************************************
- * Verify that FFA is there and that it has the correct version.
- **********************************************************************/
- SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 0);
-
- /**********************************************************************
- * If OP-TEE is SPMC skip this test.
- **********************************************************************/
- if (check_spmc_execution_level()) {
- VERBOSE("OP-TEE as SPMC at S-EL1. Skipping test!\n");
- return TEST_RESULT_SKIPPED;
- }
-
- /*
- * Declare RXTX buffers, assign them to the mailbox and call
- * FFA_RXTX_MAP.
- */
- CONFIGURE_AND_MAP_MAILBOX(mb, PAGE_SIZE, ret);
- if (ffa_func_id(ret) != expected_return) {
- ERROR("Failed to map RXTX buffers %x!\n", ffa_error_code(ret));
- return TEST_RESULT_FAIL;
- }
-
- return TEST_RESULT_SUCCESS;
-}
-
-/**
- * Test mapping RXTX buffers from NWd.
- * This test also sets the Mailbox for other SPM related tests that need to use
- * RXTX buffers.
- */
-test_result_t test_ffa_rxtx_map_success(void)
-{
- test_result_t ret = test_ffa_rxtx_map(FFA_SUCCESS_SMC32);
-
- if (ret == TEST_RESULT_SUCCESS) {
- INFO("Set RXTX Mailbox for remaining spm tests!\n");
- set_tftf_mailbox(&mb);
- }
- return ret;
-}
-
-/**
- * Test to verify that 2nd call to FFA_RXTX_MAP should fail.
- */
-test_result_t test_ffa_rxtx_map_fail(void)
-{
- INFO("This test expects error log.\n");
- return test_ffa_rxtx_map(FFA_ERROR);
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_secure_interrupts.c b/tftf/tests/runtime_services/secure_service/test_ffa_secure_interrupts.c
new file mode 100644
index 000000000..40e52c94a
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_secure_interrupts.c
@@ -0,0 +1,518 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cactus_test_cmds.h>
+#include <ffa_endpoints.h>
+#include <ffa_helpers.h>
+#include <mmio.h>
+#include <spm_common.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+#include <timer.h>
+
+#include <drivers/arm/arm_gic.h>
+
+#define SENDER HYP_ID
+#define RECEIVER SP_ID(1)
+#define RECEIVER_2 SP_ID(2)
+#define SP_SLEEP_TIME 1000U
+#define NS_TIME_SLEEP 1500U
+#define ECHO_VAL1 U(0xa0a0a0a0)
+
+static const struct ffa_uuid expected_sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}
+ };
+
+/*
+ * @Test_Aim@ Test secure interrupt handling while first Secure Partition is
+ * in RUNNING state.
+ *
+ * 1. Send a direct message request command to first Cactus SP to start the
+ * trusted watchdog timer.
+ *
+ * 2. Send a command to SP to first sleep( by executing a busy loop), then
+ * restart trusted watchdog timer and then sleep again.
+ *
+ * 3. While SP is running the first busy loop, Secure interrupt should trigger
+ * during this time.
+ *
+ * 4. The interrupt will be trapped to SPM as IRQ. SPM will inject the virtual
+ * IRQ to the first SP through vIRQ conduit and perform eret to resume
+ * execution in SP.
+ *
+ * 5. Execution traps to irq handler of Cactus SP. It will handle the secure
+ * interrupt triggered by the trusted watchdog timer.
+ *
+ * 6. Cactus SP will perform End-Of-Interrupt and resume execution in the busy
+ * loop.
+ *
+ * 7. Trusted watchdog timer will trigger once again followed by steps 4 to 6.
+ *
+ * 8. Cactus SP will send a direct response message with the elapsed time back
+ * to the normal world.
+ *
+ * 9. We make sure the time elapsed in the sleep routine by SP is not less than
+ * the requested value.
+ *
+ * 10. TFTF sends a direct request message to SP to query the ID of last serviced
+ * secure virtual interrupt.
+ *
+ * 11. Further, TFTF expects SP to return the ID of Trusted Watchdog timer
+ * interrupt through a direct response message.
+ *
+ * 12. Test finishes successfully once the TFTF disables the trusted watchdog
+ * interrupt through a direct message request command.
+ *
+ */
+
+test_result_t test_ffa_sec_interrupt_sp_running(void)
+{
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Enable trusted watchdog interrupt as IRQ in the secure side. */
+ if (!enable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret_values = cactus_send_twdog_cmd(SENDER, RECEIVER, 50);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for starting TWDOG timer\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Send request to first Cactus SP to sleep */
+ ret_values = cactus_sleep_trigger_wdog_cmd(SENDER, RECEIVER, SP_SLEEP_TIME, 50);
+
+ /*
+ * Secure interrupt should trigger during this time, Cactus
+ * will handle the trusted watchdog timer.
+ */
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for sleep command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ VERBOSE("Secure interrupt has preempted execution: %u\n",
+ cactus_get_response(ret_values));
+
+ /* Make sure elapsed time not less than sleep time */
+ if (cactus_get_response(ret_values) < SP_SLEEP_TIME) {
+ ERROR("Lapsed time less than requested sleep time\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check for the last serviced secure virtual interrupt. */
+ ret_values = cactus_get_last_interrupt_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for last serviced interrupt"
+ " command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure Trusted Watchdog timer interrupt was serviced*/
+ if (cactus_get_response(ret_values) != IRQ_TWDOG_INTID) {
+ ERROR("Trusted watchdog timer interrupt not serviced by SP\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Disable Trusted Watchdog interrupt. */
+ if (!disable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test secure interrupt handling while Secure Partition is waiting
+ * for a message.
+ *
+ * 1. Send a direct message request command to first Cactus SP to start the
+ * trusted watchdog timer.
+ *
+ * 2. Once the SP returns with a direct response message, it moves to WAITING
+ state.
+ *
+ * 3. Execute a busy loop to sleep for NS_TIME_SLEEP ms.
+ *
+ * 4. Trusted watchdog timer expires during this time which leads to secure
+ * interrupt being triggered while cpu is executing in normal world.
+ *
+ * 5. The interrupt is trapped to BL31/SPMD as FIQ and later synchronously
+ * delivered to SPM.
+ *
+ * 6. SPM injects a virtual IRQ to first Cactus Secure Partition.
+ *
+ * 7. Once the SP handles the interrupt, it returns execution back to normal
+ * world using FFA_MSG_WAIT call.
+ *
+ * 8. SPM, through the help of SPMD, resumes execution in normal world to
+ * continue the busy loop.
+ *
+ * 9. We make sure the time elapsed in the sleep routine is not less than
+ * the requested value.
+ *
+ * 10. TFTF sends a direct request message to SP to query the ID of last serviced
+ * secure virtual interrupt.
+ *
+ * 11. Further, TFTF expects SP to return the ID of Trusted Watchdog timer
+ * interrupt through a direct response message.
+ *
+ * 12. Test finishes successfully once the TFTF disables the trusted watchdog
+ * interrupt through a direct message request command.
+ *
+ */
+test_result_t test_ffa_sec_interrupt_sp_waiting(void)
+{
+ uint64_t time1;
+ volatile uint64_t time2, time_lapsed;
+ uint64_t timer_freq = read_cntfrq_el0();
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Enable trusted watchdog interrupt as IRQ in the secure side. */
+ if (!enable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send a message to SP1 through direct messaging.
+ */
+ ret_values = cactus_send_twdog_cmd(SENDER, RECEIVER, 100);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for starting TWDOG timer\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ time1 = syscounter_read();
+
+ /*
+ * Sleep for NS_TIME_SLEEP ms. This ensures secure wdog timer triggers during this
+ * time. We explicitly do not use tftf_timer_sleep();
+ */
+ waitms(NS_TIME_SLEEP);
+ time2 = syscounter_read();
+
+ /* Lapsed time should be at least equal to sleep time */
+ time_lapsed = ((time2 - time1) * 1000) / timer_freq;
+
+ if (time_lapsed < NS_TIME_SLEEP) {
+ ERROR("Time elapsed less than expected value: %llu vs %u\n",
+ time_lapsed, NS_TIME_SLEEP);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check for the last serviced secure virtual interrupt. */
+ ret_values = cactus_get_last_interrupt_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for last serviced interrupt"
+ " command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure Trusted Watchdog timer interrupt was serviced*/
+ if (cactus_get_response(ret_values) != IRQ_TWDOG_INTID) {
+ ERROR("Trusted watchdog timer interrupt not serviced by SP\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Disable Trusted Watchdog interrupt. */
+ if (!disable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test secure interrupt handling while first Secure Partition is
+ * in BLOCKED state.
+ *
+ * 1. Send a direct message request command to first Cactus SP to start the
+ * trusted watchdog timer.
+ *
+ * 2. Send a direct request to first SP to forward sleep command to second SP.
+ *
+ * 3. While second SP is running the busy loop, Secure interrupt should trigger
+ * during this time.
+ *
+ * 4. The interrupt will be trapped to SPM as IRQ. SPM will inject the virtual
+ * IRQ to the first SP through vIRQ conduit and perform eret to resume
+ * execution in first SP.
+ *
+ * 5. Execution traps to irq handler of Cactus SP. It will handle the secure
+ * interrupt triggered by the trusted watchdog timer.
+ *
+ * 6. First SP performs EOI by calling interrupt deactivate ABI and invokes
+ * FFA_RUN to resume second SP in the busy loop.
+ *
+ * 7. Second SP will complete the busy sleep loop and send a direct response
+ * message with the elapsed time back to the first SP.
+ *
+ * 8. First SP checks for the elapsed time and sends a direct response with
+ * a SUCCESS value back to tftf.
+ *
+ * 9. TFTF sends a direct request message to SP to query the ID of last serviced
+ * secure virtual interrupt.
+ *
+ * 10. Further, TFTF expects SP to return the ID of Trusted Watchdog timer
+ * interrupt through a direct response message.
+ *
+ * 11. Test finishes successfully once the TFTF disables the trusted watchdog
+ * interrupt through a direct message request command.
+ */
+test_result_t test_ffa_sec_interrupt_sp_blocked(void)
+{
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Enable trusted watchdog interrupt as IRQ in the secure side. */
+ if (!enable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret_values = cactus_send_twdog_cmd(SENDER, RECEIVER, 100);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for starting TWDOG timer\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send request to first Cactus SP to send request to Second Cactus
+ * SP to sleep
+ */
+ ret_values = cactus_fwd_sleep_cmd(SENDER, RECEIVER, RECEIVER_2,
+ SP_SLEEP_TIME, false);
+
+ /*
+ * Secure interrupt should trigger during this time, Cactus
+ * will handle the trusted watchdog timer.
+ */
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) != CACTUS_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check for the last serviced secure virtual interrupt. */
+ ret_values = cactus_get_last_interrupt_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for last serviced interrupt"
+ " command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure Trusted Watchdog timer interrupt was serviced*/
+ if (cactus_get_response(ret_values) != IRQ_TWDOG_INTID) {
+ ERROR("Trusted watchdog timer interrupt not serviced by SP\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Disable Trusted Watchdog interrupt. */
+ if (!disable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test secure interrupt handling while first Secure Partition is
+ * in WAITING state while the second Secure Partition is running.
+ *
+ * 1. Send a direct message request command to first Cactus SP to start the
+ * trusted watchdog timer.
+ *
+ * 2. Send a direct request to second SP to sleep by executing a busy loop.
+ *
+ * 3. While second SP is running the busy loop, Secure interrupt should trigger
+ * during this time.
+ *
+ * 4. The interrupt is trapped to the SPM as a physical IRQ. The SPM injects a
+ * virtual IRQ to the first SP and resumes it while it is in waiting state.
+ *
+ * 5. Execution traps to irq handler of the first Cactus SP. It will handle the
+ * secure interrupt triggered by the trusted watchdog timer.
+ *
+ * 6. Cactus SP will perform End-Of-Interrupt by calling the interrupt
+ * deactivate HVC and invoke FFA_MSG_WAIT ABI to perform interrupt signal
+ * completion.
+ *
+ * 7. SPM then resumes the second SP which was preempted by secure interrupt.
+ *
+ * 8. Second SP will complete the busy sleep loop and send a direct response
+ * message with the elapsed time back to the first SP.
+ *
+ * 9. We make sure the time elapsed in the sleep routine by SP is not less than
+ * the requested value.
+ *
+ * 10. TFTF sends a direct request message to SP to query the ID of last serviced
+ * secure virtual interrupt.
+ *
+ * 11. Further, TFTF expects SP to return the ID of Trusted Watchdog timer
+ * interrupt through a direct response message.
+ *
+ * 12. Test finishes successfully once the TFTF disables the trusted watchdog
+ * interrupt through a direct message request command.
+ */
+test_result_t test_ffa_sec_interrupt_sp1_waiting_sp2_running(void)
+{
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Enable trusted watchdog interrupt as IRQ in the secure side. */
+ if (!enable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret_values = cactus_send_twdog_cmd(SENDER, RECEIVER, 100);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for starting TWDOG timer\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Send request to Second Cactus SP to sleep. */
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER_2, SP_SLEEP_TIME);
+
+ /*
+ * Secure interrupt should trigger during this time, Cactus
+ * will handle the trusted watchdog timer.
+ */
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for sleep command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure elapsed time not less than sleep time. */
+ if (cactus_get_response(ret_values) < SP_SLEEP_TIME) {
+ ERROR("Lapsed time less than requested sleep time\n");
+ }
+
+ /* Check for the last serviced secure virtual interrupt. */
+ ret_values = cactus_get_last_interrupt_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for last serviced interrupt"
+ " command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure Trusted Watchdog timer interrupt was serviced*/
+ if (cactus_get_response(ret_values) != IRQ_TWDOG_INTID) {
+ ERROR("Trusted watchdog timer interrupt not serviced by SP\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Disable Trusted Watchdog interrupt. */
+ if (!disable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test handling of interrupt belonging to the extended SPI range
+ * while first Secure Partition is in RUNNING state.
+ *
+ * 1. Send a direct message request command to first Cactus SP to trigger the
+ * eSPI interrupt.
+ *
+ * 2. The Cactus SP either successfully handles the interrupt or fails to do
+ * so. It sends a value through direct message response indicating if the
+ * interrupt was handled.
+ *
+ * 3. TFTF sends a direct request message to SP to query the ID of last serviced
+ * secure virtual interrupt.
+ *
+ * 4. Further, TFTF expects SP to return the appropriate interrupt id through a
+ * direct response message.
+ */
+test_result_t test_ffa_espi_sec_interrupt(void)
+{
+ struct ffa_value ret_values;
+
+ /* Check if extended SPI range is implemented by GIC. */
+ if (!arm_gic_is_espi_supported()) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Enable ESPI. */
+ ret_values = cactus_interrupt_cmd(SENDER, RECEIVER, IRQ_ESPI_TEST_INTID,
+ true, INTERRUPT_TYPE_IRQ);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response message while configuring"
+ " interrupt ESPI %u\n", IRQ_ESPI_TEST_INTID);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) != CACTUS_SUCCESS) {
+ ERROR("Failed to configure ESPI interrupt\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Trigger ESPI while running. */
+ ret_values = cactus_trigger_espi_cmd(SENDER, RECEIVER, IRQ_ESPI_TEST_INTID);
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response message while triggering"
+ " interrupt ESPI %u\n", IRQ_ESPI_TEST_INTID);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) != 1) {
+ ERROR("Interrupt %u not serviced by SP\n", IRQ_ESPI_TEST_INTID);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Disable ESPI. */
+ ret_values = cactus_interrupt_cmd(SENDER, RECEIVER, IRQ_ESPI_TEST_INTID,
+ false, INTERRUPT_TYPE_IRQ);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response message while configuring"
+ " interrupt ESPI %u\n", IRQ_ESPI_TEST_INTID);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) != CACTUS_SUCCESS) {
+ ERROR("Failed to configure ESPI interrupt %u\n", IRQ_ESPI_TEST_INTID);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check for the last serviced secure virtual interrupt. */
+ ret_values = cactus_get_last_interrupt_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for last serviced interrupt"
+ " command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure Trusted Watchdog timer interrupt was serviced*/
+ if (cactus_get_response(ret_values) != IRQ_ESPI_TEST_INTID) {
+ ERROR("ESPI interrupt %u not serviced by SP\n", IRQ_ESPI_TEST_INTID);
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_setup_and_discovery.c b/tftf/tests/runtime_services/secure_service/test_ffa_setup_and_discovery.c
new file mode 100644
index 000000000..80a3015a7
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_setup_and_discovery.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+
+#include <ffa_endpoints.h>
+#include <ffa_helpers.h>
+#include <ffa_svc.h>
+#include <spm_common.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <xlat_tables_defs.h>
+
+static bool should_skip_version_test;
+
+static struct mailbox_buffers mb;
+
+static const struct ffa_uuid sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}, {IVY_UUID}
+ };
+
+static const struct ffa_partition_info ffa_expected_partition_info[] = {
+ /* Primary partition info */
+ {
+ .id = SP_ID(1),
+ .exec_context = PRIMARY_EXEC_CTX_COUNT,
+ .properties = FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_RECV |
+ FFA_PARTITION_NOTIFICATION,
+ .uuid = {PRIMARY_UUID}
+ },
+ /* Secondary partition info */
+ {
+ .id = SP_ID(2),
+ .exec_context = SECONDARY_EXEC_CTX_COUNT,
+ .properties = FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_RECV |
+ FFA_PARTITION_NOTIFICATION,
+ .uuid = {SECONDARY_UUID}
+ },
+ /* Tertiary partition info */
+ {
+ .id = SP_ID(3),
+ .exec_context = TERTIARY_EXEC_CTX_COUNT,
+ .properties = FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_RECV |
+ FFA_PARTITION_NOTIFICATION,
+ .uuid = {TERTIARY_UUID}
+ },
+ /* Ivy partition info */
+ {
+ .id = SP_ID(4),
+ .exec_context = IVY_EXEC_CTX_COUNT,
+ .properties = FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_RECV,
+ .uuid = {IVY_UUID}
+ }
+};
+
+/*
+ * Using FFA version expected for SPM.
+ */
+#define SPM_VERSION MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR)
+
+/******************************************************************************
+ * FF-A Features ABI Tests
+ ******************************************************************************/
+
+test_result_t test_ffa_features(void)
+{
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 0);
+
+ /* Check if SPMC is OP-TEE at S-EL1 */
+ if (check_spmc_execution_level()) {
+ /* FFA_FEATURES is not yet supported in OP-TEE */
+ return TEST_RESULT_SUCCESS;
+ }
+
+ struct ffa_value ffa_ret;
+ unsigned int expected_ret;
+ const struct ffa_features_test *ffa_feature_test_target;
+ unsigned int i, test_target_size =
+ get_ffa_feature_test_target(&ffa_feature_test_target);
+ struct ffa_features_test test_target;
+
+ for (i = 0U; i < test_target_size; i++) {
+ test_target = ffa_feature_test_target[i];
+ ffa_ret = ffa_features_with_input_property(test_target.feature, test_target.param);
+ expected_ret = FFA_VERSION_COMPILED
+ >= test_target.version_added ?
+ test_target.expected_ret : FFA_ERROR;
+ if (ffa_func_id(ffa_ret) != expected_ret) {
+ tftf_testcase_printf("%s returned %x, expected %x\n",
+ test_target.test_name,
+ ffa_func_id(ffa_ret),
+ expected_ret);
+ return TEST_RESULT_FAIL;
+ }
+ if ((expected_ret == FFA_ERROR) &&
+ (ffa_error_code(ffa_ret) != FFA_ERROR_NOT_SUPPORTED)) {
+ tftf_testcase_printf("%s failed for the wrong reason: "
+ "returned %x, expected %x\n",
+ test_target.test_name,
+ ffa_error_code(ffa_ret),
+ FFA_ERROR_NOT_SUPPORTED);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/******************************************************************************
+ * FF-A Version ABI Tests
+ ******************************************************************************/
+
+/*
+ * Calls FFA Version ABI, and checks if the result as expected.
+ */
+static test_result_t test_ffa_version(uint32_t input_version,
+ uint32_t expected_return)
+{
+ if (should_skip_version_test) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ struct ffa_value ret_values = ffa_version(input_version);
+
+ uint32_t spm_version = (uint32_t)(0xFFFFFFFF & ret_values.fid);
+
+ if (spm_version == expected_return) {
+ return TEST_RESULT_SUCCESS;
+ }
+
+ tftf_testcase_printf("Input Version: 0x%x\n"
+ "Return: 0x%x\nExpected: 0x%x\n",
+ input_version, spm_version, expected_return);
+
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * @Test_Aim@ Validate what happens when using same version as SPM.
+ */
+test_result_t test_ffa_version_equal(void)
+{
+ /*
+ * FFA_VERSION interface is used to check that SPM functionality is
+ * supported. On FFA_VERSION invocation from TFTF, the SPMD returns
+ * either NOT_SUPPORTED or the SPMC version value provided in the SPMC
+ * manifest. The variable "should_skip_test" is set to true when the
+ * SPMD returns NOT_SUPPORTED or a mismatched version, which means that
+ * a TFTF physical FF-A endpoint version (SPM_VERSION) does not match
+ * the SPMC's physical FF-A endpoint version. This prevents running the
+ * subsequent FF-A version tests (and break the test flow), as they're
+ * not relevant when the SPMD is not present within BL31
+ * (FFA_VERSION returns NOT_SUPPORTED).
+ */
+ test_result_t ret = test_ffa_version(SPM_VERSION, SPM_VERSION);
+
+ if (ret != TEST_RESULT_SUCCESS) {
+ should_skip_version_test = true;
+ ret = TEST_RESULT_SKIPPED;
+ }
+ return ret;
+}
+
+/*
+ * @Test_Aim@ Validate what happens when setting bit 31 in
+ * 'input_version'. As per spec, FFA version is 31 bits long.
+ * Bit 31 set is an invalid input.
+ */
+test_result_t test_ffa_version_bit31(void)
+{
+ return test_ffa_version(FFA_VERSION_BIT31_MASK | SPM_VERSION,
+ FFA_ERROR_NOT_SUPPORTED);
+}
+
+/*
+ * @Test_Aim@ Validate what happens for bigger version than SPM's.
+ */
+test_result_t test_ffa_version_bigger(void)
+{
+ return test_ffa_version(MAKE_FFA_VERSION(FFA_VERSION_MAJOR + 1, 0),
+ FFA_ERROR_NOT_SUPPORTED);
+}
+
+/*
+ * @Test_Aim@ Validate what happens for smaller version than SPM's.
+ */
+test_result_t test_ffa_version_smaller(void)
+{
+ return test_ffa_version(MAKE_FFA_VERSION(0, 9),
+ FFA_ERROR_NOT_SUPPORTED);
+}
+
+/******************************************************************************
+ * FF-A RXTX ABI Tests
+ ******************************************************************************/
+
+static test_result_t test_ffa_rxtx_map(uint32_t expected_return)
+{
+ struct ffa_value ret;
+
+ /**********************************************************************
+ * Verify that FFA is there and that it has the correct version.
+ **********************************************************************/
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 0);
+
+ /**********************************************************************
+ * If OP-TEE is SPMC skip this test.
+ **********************************************************************/
+ if (check_spmc_execution_level()) {
+ VERBOSE("OP-TEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /*
+ * Declare RXTX buffers, assign them to the mailbox and call
+ * FFA_RXTX_MAP.
+ */
+ CONFIGURE_AND_MAP_MAILBOX(mb, PAGE_SIZE, ret);
+ if (ffa_func_id(ret) != expected_return) {
+ ERROR("Failed to map RXTX buffers %x!\n", ffa_error_code(ret));
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test mapping RXTX buffers from NWd.
+ */
+test_result_t test_ffa_rxtx_map_success(void)
+{
+ return test_ffa_rxtx_map(FFA_SUCCESS_SMC32);
+}
+
+/**
+ * Test to verify that 2nd call to FFA_RXTX_MAP should fail.
+ */
+test_result_t test_ffa_rxtx_map_fail(void)
+{
+ VERBOSE("This test expects error log.\n");
+ return test_ffa_rxtx_map(FFA_ERROR);
+}
+
+static test_result_t test_ffa_rxtx_unmap(uint32_t expected_return)
+{
+ struct ffa_value ret;
+
+ /**********************************************************************
+ * Verify that FFA is there and that it has the correct version.
+ **********************************************************************/
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 0);
+
+ /**********************************************************************
+ * If OP-TEE is SPMC skip this test.
+ **********************************************************************/
+ if (check_spmc_execution_level()) {
+ VERBOSE("OP-TEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ ret = ffa_rxtx_unmap();
+ if (!is_expected_ffa_return(ret, expected_return)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test unmapping RXTX buffers from NWd.
+ */
+test_result_t test_ffa_rxtx_unmap_success(void)
+{
+ return test_ffa_rxtx_unmap(FFA_SUCCESS_SMC32);
+}
+
+/**
+ * Test to verify that 2nd call to FFA_RXTX_UNMAP should fail.
+ */
+test_result_t test_ffa_rxtx_unmap_fail(void)
+{
+ VERBOSE("This test expects error log.\n");
+ return test_ffa_rxtx_unmap(FFA_ERROR);
+}
+
+/**
+ * Test mapping RXTX buffers that have been previously unmapped from NWd.
+ */
+test_result_t test_ffa_rxtx_map_unmapped_success(void)
+{
+ test_result_t ret = test_ffa_rxtx_map(FFA_SUCCESS_SMC32);
+ /*
+ * Unmapping buffers such that further tests can map and use RXTX
+ * buffers.
+ * Subsequent attempts to map the RXTX buffers will fail, if this is
+ * invoked at this point.
+ */
+ ffa_rxtx_unmap();
+ return ret;
+}
+
+/*
+ * The FFA_RXTX_UNMAP specification at the NS physical FF-A instance allows for
+ * an ID to be given to the SPMC. The ID should relate to a VM that had its ID
+ * previously forwarded to the SPMC.
+ * This test validates that calls to FFA_RXTX_UNMAP from the NS physical
+ * instance can't unmap RXTX buffer pair of an SP.
+ */
+test_result_t test_ffa_rxtx_unmap_fail_if_sp(void)
+{
+ struct ffa_value ret;
+ struct ffa_value args;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, sp_uuids);
+
+ /* Invoked FFA_RXTX_UNMAP, providing the ID of an SP in w1. */
+ args = (struct ffa_value) {
+ .fid = FFA_RXTX_UNMAP,
+ .arg1 = SP_ID(1) << 16,
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ ret = ffa_service_call(&args);
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_INVALID_PARAMETER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/******************************************************************************
+ * FF-A SPM_ID_GET ABI Tests
+ ******************************************************************************/
+
+test_result_t test_ffa_spm_id_get(void)
+{
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ struct ffa_value ffa_ret = ffa_spm_id_get();
+
+ if (is_ffa_call_error(ffa_ret)) {
+ ERROR("FFA_SPM_ID_GET call failed! Error code: 0x%x\n",
+ ffa_error_code(ffa_ret));
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check the SPMC value given in the fvp_spmc_manifest is returned */
+ ffa_id_t spm_id = ffa_endpoint_id(ffa_ret);
+
+ if (spm_id != SPMC_ID) {
+ ERROR("Expected SPMC_ID of 0x%x\n received: 0x%x\n",
+ SPMC_ID, spm_id);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/******************************************************************************
+ * FF-A PARTITION_INFO_GET ABI Tests
+ ******************************************************************************/
+
+/**
+ * Attempt to get the SP partition information for individual partitions as well
+ * as all secure partitions.
+ */
+test_result_t test_ffa_partition_info(void)
+{
+ /***********************************************************************
+ * Check if SPMC has ffa_version and expected FFA endpoints are deployed.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 1, sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ if (!ffa_partition_info_helper(&mb, sp_uuids[0],
+ &ffa_expected_partition_info[0], 1)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!ffa_partition_info_helper(&mb, sp_uuids[1],
+ &ffa_expected_partition_info[1], 1)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!ffa_partition_info_helper(&mb, sp_uuids[2],
+ &ffa_expected_partition_info[2], 1)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!ffa_partition_info_helper(&mb, NULL_UUID,
+ ffa_expected_partition_info,
+ ARRAY_SIZE(ffa_expected_partition_info))) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Attempt to get v1.0 partition info descriptors.
+ */
+test_result_t test_ffa_partition_info_v1_0(void)
+{
+ /**************************************************************
+ * Check if SPMC has ffa_version and expected FFA endpoints
+ * are deployed.
+ *************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 0, sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ test_result_t result = TEST_RESULT_SUCCESS;
+ struct ffa_value ret = ffa_partition_info_get(NULL_UUID);
+ uint64_t expected_size = ARRAY_SIZE(ffa_expected_partition_info);
+
+ if (ffa_func_id(ret) == FFA_SUCCESS_SMC32) {
+ if (ffa_partition_info_count(ret) != expected_size) {
+ ERROR("Unexpected number of partitions %d\n",
+ ffa_partition_info_count(ret));
+ return TEST_RESULT_FAIL;
+ }
+ if (ffa_partition_info_desc_size(ret) !=
+ sizeof(struct ffa_partition_info_v1_0)) {
+ ERROR("Unexepcted partition info descriptor size %d\n",
+ ffa_partition_info_desc_size(ret));
+ return TEST_RESULT_FAIL;
+ }
+ const struct ffa_partition_info_v1_0 *info =
+ (const struct ffa_partition_info_v1_0 *)(mb.recv);
+
+ for (unsigned int i = 0U; i < expected_size; i++) {
+ uint32_t expected_properties_v1_0 =
+ ffa_expected_partition_info[i].properties &
+ ~FFA_PARTITION_v1_0_RES_MASK;
+
+ if (info[i].id != ffa_expected_partition_info[i].id) {
+ ERROR("Wrong ID. Expected %x, got %x\n",
+ ffa_expected_partition_info[i].id,
+ info[i].id);
+ result = TEST_RESULT_FAIL;
+ }
+ if (info[i].exec_context !=
+ ffa_expected_partition_info[i].exec_context) {
+ ERROR("Wrong context. Expected %d, got %d\n",
+ ffa_expected_partition_info[i].exec_context,
+ info[i].exec_context);
+ result = TEST_RESULT_FAIL;
+ }
+ if (info[i].properties !=
+ expected_properties_v1_0) {
+ ERROR("Wrong properties. Expected %d, got %d\n",
+ expected_properties_v1_0,
+ info[i].properties);
+ result = TEST_RESULT_FAIL;
+ }
+ }
+ }
+
+ ret = ffa_rx_release();
+ if (is_ffa_call_error(ret)) {
+ ERROR("Failed to release RX buffer\n");
+ result = TEST_RESULT_FAIL;
+ }
+ return result;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_smccc.c b/tftf/tests/runtime_services/secure_service/test_ffa_smccc.c
new file mode 100644
index 000000000..15ca712a0
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_smccc.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+
+#include <ffa_endpoints.h>
+#include <ffa_helpers.h>
+#include <ffa_svc.h>
+#include <runtime_services/spm_test_helpers.h>
+#include <spm_common.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <xlat_tables_defs.h>
+
+#define expect_eq(expr, value) \
+ do { \
+ if ((expr) != (value)) { \
+ ERROR("expect failed %s:%u\n", __FILE__, __LINE__); \
+ return TEST_RESULT_FAIL; \
+ } \
+ } while (0);
+
+static const struct ffa_uuid sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}, {IVY_UUID}
+ };
+
+struct ffa_value8 {
+ u_register_t fid;
+ u_register_t arg1;
+ u_register_t arg2;
+ u_register_t arg3;
+ u_register_t arg4;
+ u_register_t arg5;
+ u_register_t arg6;
+ u_register_t arg7;
+};
+
+/* Declared in test_ffa_smccc_asm.S. */
+uint32_t test_ffa_smc(struct ffa_value8 *);
+uint32_t test_ffa_smc_ext(struct ffa_value *);
+
+/**
+ * FF-A service calls are emitted at the NS physical FF-A instance.
+ * Such services do not return results in registers beyond x7.
+ * Check callee(s) preserves GP registers beyond x7 per SMCCCv1.2.
+ */
+test_result_t test_smccc_callee_preserved(void)
+{
+ struct ffa_value8 args;
+ struct mailbox_buffers mb;
+
+ /*
+ * Permit running the test on configurations running
+ * the S-EL2 SPMC where 4 test partitions are deployed.
+ */
+ CHECK_SPMC_TESTING_SETUP(1, 1, sp_uuids);
+ reset_tftf_mailbox();
+
+ /* Declare RX/TX buffers locally to the test. */
+ CONFIGURE_MAILBOX(mb, PAGE_SIZE);
+
+ memset(&args, 0, sizeof(struct ffa_value8));
+ args.fid = FFA_VERSION;
+ args.arg1 = 0x10001;
+ expect_eq(test_ffa_smc(&args), 0);
+ expect_eq(args.fid, FFA_VERSION_COMPILED);
+ expect_eq(args.arg1, 0);
+ expect_eq(args.arg2, 0);
+ expect_eq(args.arg3, 0);
+ expect_eq(args.arg4, 0);
+ expect_eq(args.arg5, 0);
+ expect_eq(args.arg6, 0);
+ expect_eq(args.arg7, 0);
+
+ memset(&args, 0, sizeof(struct ffa_value8));
+ args.fid = FFA_ID_GET;
+ expect_eq(test_ffa_smc(&args), 0);
+ expect_eq(args.fid, FFA_SUCCESS_SMC32);
+ expect_eq(args.arg1, 0);
+ expect_eq(args.arg2, 0);
+ expect_eq(args.arg3, 0);
+ expect_eq(args.arg4, 0);
+ expect_eq(args.arg5, 0);
+ expect_eq(args.arg6, 0);
+ expect_eq(args.arg7, 0);
+
+ memset(&args, 0, sizeof(struct ffa_value8));
+ args.fid = FFA_RXTX_MAP_SMC64;
+ args.arg1 = (uintptr_t)mb.send;
+ args.arg2 = (uintptr_t)mb.recv;
+ args.arg3 = 1;
+ expect_eq(test_ffa_smc(&args), 0);
+ expect_eq(args.fid, FFA_SUCCESS_SMC32);
+ expect_eq(args.arg1, 0);
+ expect_eq(args.arg2, 0);
+ expect_eq(args.arg3, 0);
+ expect_eq(args.arg4, 0);
+ expect_eq(args.arg5, 0);
+ expect_eq(args.arg6, 0);
+ expect_eq(args.arg7, 0);
+
+ memset(&args, 0, sizeof(struct ffa_value8));
+ args.fid = FFA_PARTITION_INFO_GET;
+ expect_eq(test_ffa_smc(&args), 0);
+ expect_eq(args.fid, FFA_SUCCESS_SMC32);
+ expect_eq(args.arg1, 0);
+ expect_eq(args.arg2, ARRAY_SIZE(sp_uuids));
+ expect_eq(args.arg3, sizeof(struct ffa_partition_info));
+ expect_eq(args.arg4, 0);
+ expect_eq(args.arg5, 0);
+ expect_eq(args.arg6, 0);
+ expect_eq(args.arg7, 0);
+
+ memset(&args, 0, sizeof(struct ffa_value8));
+ args.fid = FFA_RX_RELEASE;
+ expect_eq(test_ffa_smc(&args), 0);
+ expect_eq(args.fid, FFA_SUCCESS_SMC32);
+ expect_eq(args.arg1, 0);
+ expect_eq(args.arg2, 0);
+ expect_eq(args.arg3, 0);
+ expect_eq(args.arg4, 0);
+ expect_eq(args.arg5, 0);
+ expect_eq(args.arg6, 0);
+ expect_eq(args.arg7, 0);
+
+ memset(&args, 0, sizeof(struct ffa_value8));
+ args.fid = FFA_RXTX_UNMAP;
+ expect_eq(test_ffa_smc(&args), 0);
+ expect_eq(args.fid, FFA_SUCCESS_SMC32);
+ expect_eq(args.arg1, 0);
+ expect_eq(args.arg2, 0);
+ expect_eq(args.arg3, 0);
+ expect_eq(args.arg4, 0);
+ expect_eq(args.arg5, 0);
+ expect_eq(args.arg6, 0);
+ expect_eq(args.arg7, 0);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * An FF-A service call is emitted at the NS physical FF-A instance.
+ * The service returns results in x0-x17 registers.
+ * Check callee(s) preserve GP registers beyond x17 per SMCCCv1.2.
+ */
+test_result_t test_smccc_ext_callee_preserved(void)
+{
+ struct ffa_value args_ext;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, sp_uuids);
+
+ /* Test the SMCCC extended registers range. */
+ memset(&args_ext, 0, sizeof(struct ffa_value));
+ args_ext.fid = FFA_PARTITION_INFO_GET_REGS_SMC64;
+ expect_eq(test_ffa_smc_ext(&args_ext), 0);
+ expect_eq(args_ext.fid, FFA_SUCCESS_SMC64);
+ expect_eq(args_ext.arg1, 0);
+ expect_eq(args_ext.arg2 >> 48, sizeof(struct ffa_partition_info));
+ expect_eq(args_ext.arg2 & 0xffff, ARRAY_SIZE(sp_uuids) - 1);
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_smccc_asm.S b/tftf/tests/runtime_services/secure_service/test_ffa_smccc_asm.S
new file mode 100644
index 000000000..00d82eeb3
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_smccc_asm.S
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .global test_ffa_smc
+ .global test_ffa_smc_ext
+
+ .section .text, "ax"
+
+/**
+ * test_ffa_smc
+ *
+ * x0 - ptr to a struct ffa_value8
+ *
+ * This function is used to test FF-A ABIs on top of SMCCCv1.2 for interfaces
+ * requiring at most 8 input/output registers.
+ * Load 8 GP input registers, move a pattern into x8-x29 and emit an SMC.
+ * On return save 8 output registers to the structure and compare x8-x29
+ * to the known pattern. If a register was altered it indicates an SMCCC
+ * violation and the function returns with a value greater than 0.
+ * The function returns 0 on success.
+ */
+func test_ffa_smc
+ /* Save FP/LR. */
+ stp x29, x30, [sp, #-16]!
+
+ /* Save x19-x28 per AAPCS64. */
+ stp x27, x28, [sp, #-16]!
+ stp x25, x26, [sp, #-16]!
+ stp x23, x24, [sp, #-16]!
+ stp x21, x22, [sp, #-16]!
+ stp x19, x20, [sp, #-16]!
+
+ /*
+ * Store the struct ffa_value pointer to x30.
+ * x30 is preserved by the SMC callee.
+ */
+ mov x30, x0
+
+ /* Load the SMC service input values. */
+ ldp x0, x1, [x30]
+ ldp x2, x3, [x30, #16]
+ ldp x4, x5, [x30, #32]
+ ldp x6, x7, [x30, #48]
+ mov x8, #0xa8
+ add x9, x8, #1
+ add x10, x8, #2
+ add x11, x8, #3
+ add x12, x8, #4
+ add x13, x8, #5
+ add x14, x8, #6
+ add x15, x8, #7
+ add x16, x8, #8
+ add x17, x8, #9
+ add x18, x8, #10
+ add x19, x8, #11
+ add x20, x8, #12
+ add x21, x8, #13
+ add x22, x8, #14
+ add x23, x8, #15
+ add x24, x8, #16
+ add x25, x8, #17
+ add x26, x8, #18
+ add x27, x8, #19
+ add x28, x8, #20
+ add x29, x8, #21
+ smc #0
+
+ /* Store the SMC service return values. */
+ stp x0, x1, [x30]
+ stp x2, x3, [x30, #16]
+ stp x4, x5, [x30, #32]
+ stp x6, x7, [x30, #48]
+
+ /* Check if SMC callee-preserved registers were altered. */
+ mov w0, wzr
+ cmp x8, #0xa8
+ cinc x0, x0, ne
+ cmp x9, #0xa9
+ cinc x0, x0, ne
+ cmp x10, #0xaa
+ cinc x0, x0, ne
+ cmp x11, #0xab
+ cinc x0, x0, ne
+ cmp x12, #0xac
+ cinc x0, x0, ne
+ cmp x13, #0xad
+ cinc x0, x0, ne
+ cmp x14, #0xae
+ cinc x0, x0, ne
+ cmp x15, #0xaf
+ cinc x0, x0, ne
+ cmp x16, #0xb0
+ cinc x0, x0, ne
+ cmp x17, #0xb1
+ cinc x0, x0, ne
+ cmp x18, #0xb2
+ cinc x0, x0, ne
+ cmp x19, #0xb3
+ cinc x0, x0, ne
+ cmp x20, #0xb4
+ cinc x0, x0, ne
+ cmp x21, #0xb5
+ cinc x0, x0, ne
+ cmp x22, #0xb6
+ cinc x0, x0, ne
+ cmp x23, #0xb7
+ cinc x0, x0, ne
+ cmp x24, #0xb8
+ cinc x0, x0, ne
+ cmp x25, #0xb9
+ cinc x0, x0, ne
+ cmp x26, #0xba
+ cinc x0, x0, ne
+ cmp x27, #0xbb
+ cinc x0, x0, ne
+ cmp x28, #0xbc
+ cinc x0, x0, ne
+ cmp x29, #0xbd
+ cinc x0, x0, ne
+
+ /* Restore x19-x28 per AAPCS64. */
+ ldp x19, x20, [sp], #16
+ ldp x21, x22, [sp], #16
+ ldp x23, x24, [sp], #16
+ ldp x25, x26, [sp], #16
+ ldp x27, x28, [sp], #16
+
+ /* Restore FP/LR. */
+ ldp x29, x30, [sp], #16
+ ret
+endfunc test_ffa_smc
+
+/**
+ * test_ffa_smc_ext
+ *
+ * x0 - ptr to a struct ffa_value
+ *
+ * This function is used to test FF-A ABIs on top of SMCCCv1.2 for interfaces
+ * requiring at most 18 input/output registers.
+ * Load 18 GP input registers, move a pattern into x18-x29 and emit an SMC.
+ * On return save 18 output registers to the structure and compare x18-x29
+ * to the known pattern. If a register was altered it indicates an SMCCC
+ * violation and the function returns with a value greater than 0.
+ * The function returns 0 on success.
+ */
+func test_ffa_smc_ext
+ /* Save FP/LR. */
+ stp x29, x30, [sp, #-16]!
+
+ /* Save x19-x28 per AAPCS64. */
+ stp x27, x28, [sp, #-16]!
+ stp x25, x26, [sp, #-16]!
+ stp x23, x24, [sp, #-16]!
+ stp x21, x22, [sp, #-16]!
+ stp x19, x20, [sp, #-16]!
+
+ /*
+ * Store the struct ffa_value_ext pointer to x30.
+ * x30 is preserved by the SMC callee.
+ */
+ mov x30, x0
+
+ /* Load the SMC service input values. */
+ ldp x0, x1, [x30]
+ ldp x2, x3, [x30, #16]
+ ldp x4, x5, [x30, #32]
+ ldp x6, x7, [x30, #48]
+ ldp x8, x9, [x30, #64]
+ ldp x10, x11, [x30, #80]
+ ldp x12, x13, [x30, #96]
+ ldp x14, x15, [x30, #112]
+ ldp x16, x17, [x30, #128]
+ mov x18, #0xb2
+ add x19, x18, #1
+ add x20, x18, #2
+ add x21, x18, #3
+ add x22, x18, #4
+ add x23, x18, #5
+ add x24, x18, #6
+ add x25, x18, #7
+ add x26, x18, #8
+ add x27, x18, #9
+ add x28, x18, #10
+ add x29, x18, #11
+ smc #0
+
+ /* Store the SMC service return values. */
+ stp x0, x1, [x30]
+ stp x2, x3, [x30, #16]
+ stp x4, x5, [x30, #32]
+ stp x6, x7, [x30, #48]
+ stp x8, x9, [x30, #64]
+ stp x10, x11, [x30, #80]
+ stp x12, x13, [x30, #96]
+ stp x14, x15, [x30, #112]
+ stp x16, x17, [x30, #128]
+
+ /* Check if SMC callee-preserved registers were altered. */
+ mov w0, wzr
+ cmp x18, #0xb2
+ cinc x0, x0, ne
+ cmp x19, #0xb3
+ cinc x0, x0, ne
+ cmp x20, #0xb4
+ cinc x0, x0, ne
+ cmp x21, #0xb5
+ cinc x0, x0, ne
+ cmp x22, #0xb6
+ cinc x0, x0, ne
+ cmp x23, #0xb7
+ cinc x0, x0, ne
+ cmp x24, #0xb8
+ cinc x0, x0, ne
+ cmp x25, #0xb9
+ cinc x0, x0, ne
+ cmp x26, #0xba
+ cinc x0, x0, ne
+ cmp x27, #0xbb
+ cinc x0, x0, ne
+ cmp x28, #0xbc
+ cinc x0, x0, ne
+ cmp x29, #0xbd
+ cinc x0, x0, ne
+
+ /* Restore x19-x28 per AAPCS64. */
+ ldp x19, x20, [sp], #16
+ ldp x21, x22, [sp], #16
+ ldp x23, x24, [sp], #16
+ ldp x25, x26, [sp], #16
+ ldp x27, x28, [sp], #16
+
+ /* Restore FP/LR. */
+ ldp x29, x30, [sp], #16
+ ret
+endfunc test_ffa_smc_ext
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_version.c b/tftf/tests/runtime_services/secure_service/test_ffa_version.c
deleted file mode 100644
index 41eca5adc..000000000
--- a/tftf/tests/runtime_services/secure_service/test_ffa_version.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <ffa_svc.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-
-/*
- * Using FFA version expected for SPM.
- */
-#define SPM_VERSION MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR)
-
-static bool should_skip_test;
-
-/*
- * Calls FFA Version ABI, and checks if the result as expected.
- */
-static test_result_t test_ffa_version(uint32_t input_version, uint32_t expected_return)
-{
- if (should_skip_test) {
- return TEST_RESULT_SKIPPED;
- }
-
- smc_ret_values ret_values = ffa_version(input_version);
-
- uint32_t spm_version = (uint32_t)(0xFFFFFFFF & ret_values.ret0);
-
- if (spm_version == expected_return) {
- return TEST_RESULT_SUCCESS;
- }
-
- tftf_testcase_printf("Input Version: 0x%x\nReturn: 0x%x\nExpected: 0x%x\n",
- input_version, spm_version, expected_return);
-
- return TEST_RESULT_FAIL;
-}
-
-/*
- * @Test_Aim@ Validate what happens when using same version as SPM.
- */
-test_result_t test_ffa_version_equal(void)
-{
- /*
- * FFA_VERSION interface is used to check that SPM functionality is supported.
- * On FFA_VERSION invocation from TFTF, the SPMD returns either NOT_SUPPORTED or
- * the SPMC version value provided in the SPMC manifest. The variable "should_skip_test"
- * is set to true when the SPMD returns NOT_SUPPORTED or a mismatched version, which
- * means that a TFTF physical FF-A endpoint version (SPM_VERSION) does not match the
- * SPMC's physical FF-A endpoint version. This prevents running the subsequent FF-A
- * version tests (and break the test flow), as they're not relevant when the SPMD is
- * not present within BL31 (FFA_VERSION returns NOT_SUPPORTED).
- */
- test_result_t ret = test_ffa_version(SPM_VERSION, SPM_VERSION);
- if (ret != TEST_RESULT_SUCCESS) {
- should_skip_test = true;
- ret = TEST_RESULT_SKIPPED;
- }
- return ret;
-}
-
-/*
- * @Test_Aim@ Validate what happens when setting bit 31 in
- * 'input_version'. As per spec, FFA version is 31 bits long.
- * Bit 31 set is an invalid input.
- */
-test_result_t test_ffa_version_bit31(void)
-{
- return test_ffa_version(FFA_VERSION_BIT31_MASK | SPM_VERSION, FFA_ERROR_NOT_SUPPORTED);
-}
-
-/*
- * @Test_Aim@ Validate what happens for bigger version than SPM's.
- */
-test_result_t test_ffa_version_bigger(void)
-{
- return test_ffa_version(MAKE_FFA_VERSION(FFA_VERSION_MAJOR + 1, 0), SPM_VERSION);
-}
-
-/*
- * @Test_Aim@ Validate what happens for smaller version than SPM's.
- */
-test_result_t test_ffa_version_smaller(void)
-{
- return test_ffa_version(MAKE_FFA_VERSION(0, 9), SPM_VERSION);
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_quark_request.c b/tftf/tests/runtime_services/secure_service/test_quark_request.c
deleted file mode 100644
index 0f9556fe2..000000000
--- a/tftf/tests/runtime_services/secure_service/test_quark_request.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <quark_def.h>
-#include <spci_helpers.h>
-#include <test_helpers.h>
-
-/*
- * @Test_Aim@ This tests opens a Secure Service handle and performs a simple
- * request to Quark to verify that its memory map is correct and that it is
- * working as expected.
- */
-test_result_t test_quark_request(void)
-{
- int ret;
- uint16_t handle_quark;
- u_register_t rx1, rx2, rx3;
- test_result_t result = TEST_RESULT_SUCCESS;
-
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- /* Open handles. */
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_quark,
- QUARK_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- return TEST_RESULT_FAIL;
- }
-
- /* Send request to Quark */
-
- ret = spci_service_request_blocking(QUARK_GET_MAGIC,
- 0, 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_quark,
- &rx1, &rx2, &rx3);
-
- if (ret == SPCI_SUCCESS) {
- if (rx1 != QUARK_MAGIC_NUMBER) {
- tftf_testcase_printf("%d: Quark returned 0x%x 0x%lx 0x%lx 0x%lx\n",
- __LINE__, (uint32_t)ret, rx1, rx2, rx3);
- result = TEST_RESULT_FAIL;
- }
- } else {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* Close handle */
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_quark);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
- return result;
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spci_blocking_request.c b/tftf/tests/runtime_services/secure_service/test_spci_blocking_request.c
deleted file mode 100644
index 6d248428e..000000000
--- a/tftf/tests/runtime_services/secure_service/test_spci_blocking_request.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch_helpers.h>
-#include <cactus_def.h>
-#include <debug.h>
-#include <events.h>
-#include <ivy_def.h>
-#include <plat_topology.h>
-#include <platform.h>
-#include <power_management.h>
-#include <smccc.h>
-#include <spci_helpers.h>
-#include <spci_svc.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-
-#define TEST_NUM_ITERATIONS 1000U
-
-static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
-
-static test_result_t test_spci_blocking_multicore_fn(void)
-{
- int ret;
- uint16_t handle_cactus, handle_ivy;
- u_register_t rx1, rx2, rx3;
- test_result_t result = TEST_RESULT_SUCCESS;
-
- u_register_t cpu_mpid = read_mpidr_el1() & MPID_MASK;
- unsigned int core_pos = platform_get_core_pos(cpu_mpid);
-
- tftf_send_event(&cpu_has_entered_test[core_pos]);
-
- /* Open handles. */
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_cactus,
- CACTUS_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- goto exit_none;
- }
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_ivy,
- IVY_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- goto exit_cactus;
- }
-
- /* Request services. */
-
- for (unsigned int i = 0U; i < TEST_NUM_ITERATIONS; i++) {
-
- /* Send request to Cactus */
-
- ret = spci_service_request_blocking(CACTUS_GET_MAGIC,
- 0, 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- &rx1, &rx2, &rx3);
-
- if (ret == SPCI_BUSY) {
- /*
- * Another CPU is already using Cactus, this is not a
- * failure.
- */
- } else if (ret == SPCI_SUCCESS) {
- if (rx1 != CACTUS_MAGIC_NUMBER) {
- tftf_testcase_printf("%d: Cactus returned 0x%x 0x%lx 0x%lx 0x%lx\n",
- __LINE__, (uint32_t)ret, rx1, rx2, rx3);
- result = TEST_RESULT_FAIL;
- break;
- }
- } else {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- break;
- }
-
- /* Send request to Ivy */
-
- ret = spci_service_request_blocking(IVY_GET_MAGIC,
- 0, 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_ivy,
- &rx1, &rx2, &rx3);
- if (ret == SPCI_BUSY) {
- /*
- * Another CPU is already using Ivy, this is not a
- * failure.
- */
- } else if (ret == SPCI_SUCCESS) {
- if (rx1 != IVY_MAGIC_NUMBER) {
- tftf_testcase_printf("%d: Ivy returned 0x%x 0x%lx 0x%lx 0x%lx\n",
- __LINE__, (uint32_t)ret, rx1, rx2, rx3);
- result = TEST_RESULT_FAIL;
- break;
- }
- } else {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- break;
- }
-
- }
-
- /* Close handles. */
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_ivy);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
-exit_cactus:
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* All tests finished. */
-exit_none:
- return result;
-}
-
-/*
- * @Test_Aim@ This tests opens a Secure Service handle and performs many simple
- * blocking requests to Cactus and Ivy from multiple cores
- */
-test_result_t test_spci_blocking_request_multicore(void)
-{
- unsigned int cpu_node, core_pos;
- int psci_ret;
- u_register_t cpu_mpid;
- u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
-
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
- tftf_init_event(&cpu_has_entered_test[i]);
- }
-
- /* Power on all CPUs */
- for_each_cpu(cpu_node) {
- cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
- /* Skip lead CPU as it is already powered on */
- if (cpu_mpid == lead_mpid) {
- continue;
- }
-
- psci_ret = tftf_cpu_on(cpu_mpid,
- (uintptr_t)test_spci_blocking_multicore_fn, 0);
- if (psci_ret != PSCI_E_SUCCESS) {
- core_pos = platform_get_core_pos(cpu_mpid);
-
- tftf_testcase_printf(
- "Failed to power on CPU %d (rc = %d)\n",
- core_pos, psci_ret);
- return TEST_RESULT_FAIL;
- }
- }
-
- /* Wait until all CPUs have started the test. */
- for_each_cpu(cpu_node) {
- cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
- /* Skip lead CPU */
- if (cpu_mpid == lead_mpid) {
- continue;
- }
-
- core_pos = platform_get_core_pos(cpu_mpid);
- tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
- }
-
- /* Enter the test on lead CPU and return the result. */
- return test_spci_blocking_multicore_fn();
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spci_blocking_while_busy.c b/tftf/tests/runtime_services/secure_service/test_spci_blocking_while_busy.c
deleted file mode 100644
index dffd910f8..000000000
--- a/tftf/tests/runtime_services/secure_service/test_spci_blocking_while_busy.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <cactus_def.h>
-#include <debug.h>
-#include <smccc.h>
-#include <spci_helpers.h>
-#include <spci_svc.h>
-#include <string.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-#include <timer.h>
-
-static volatile int timer_irq_received;
-
-/*
- * ISR for the timer interrupt. Update a global variable to check it has been
- * called.
- */
-static int timer_handler(void *data)
-{
- assert(timer_irq_received == 0);
- timer_irq_received = 1;
- return 0;
-}
-
-/*
- * @Test_Aim@ Test that blocking requests can only be done when there are no
- * active non-blocking requests in a partition.
- *
- * 1. Register a handler for the non-secure timer interrupt. Program it to fire
- * in a certain time.
- *
- * 2. Send a non-blocking request to Cactus to sleep for more time than the
- * timer.
- *
- * 3. While servicing the timer sleep request, the non-secure timer should
- * fire and interrupt Cactus.
- *
- * 5. Check that the interrupt has been handled.
- *
- * 6. Make sure that the response isn't ready yet.
- *
- * 7. Try to send a blocking request. It should be denied because the partition
- * is busy.
- *
- * 8. Return to Cactus to finish the request.
- */
-test_result_t test_spci_blocking_while_busy(void)
-{
- int ret;
- u_register_t rx1, rx2, rx3;
- uint16_t handle_cactus;
- uint32_t token_cactus;
- test_result_t result = TEST_RESULT_SUCCESS;
-
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- /* Open handle. */
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_cactus,
- CACTUS_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- /* Program timer */
-
- timer_irq_received = 0;
- tftf_timer_register_handler(timer_handler);
-
- ret = tftf_program_timer(100);
- if (ret < 0) {
- tftf_testcase_printf("Failed to program timer (%d)\n", ret);
- result = TEST_RESULT_FAIL;
- goto exit_close_handle;
- }
-
- enable_irq();
-
- /* Send request to Cactus */
-
- ret = spci_service_request_start(CACTUS_SLEEP_MS, 200U,
- 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- &token_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: %d\n",
- __LINE__, ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* Check that the interrupt has been handled. */
-
- if (timer_irq_received == 0) {
- tftf_testcase_printf("%d: Didn't handle interrupt\n", __LINE__);
- result = TEST_RESULT_FAIL;
- }
-
- tftf_cancel_timer();
- tftf_timer_unregister_handler();
-
- /* Make sure that the response is not ready yet. */
-
- ret = spci_service_get_response(TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- token_cactus,
- NULL, NULL, NULL);
-
- if (ret == SPCI_SUCCESS) {
- tftf_testcase_printf("%d: Cactus returned SPCI_SUCCESS\n",
- __LINE__);
- result = TEST_RESULT_FAIL;
- goto exit_close_handle;
- }
-
- /*
- * Try to send a blocking request. It should be denied because the
- * partition is busy.
- */
-
- ret = spci_service_request_blocking(CACTUS_GET_MAGIC,
- 0, 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- &rx1, &rx2, &rx3);
- if (ret != SPCI_BUSY) {
- tftf_testcase_printf("%d: Cactus should have returned SPCI_BUSY. Returned %d 0x%lx 0x%lx 0x%lx\n",
- __LINE__, ret, rx1, rx2, rx3);
- result = TEST_RESULT_FAIL;
- goto exit_close_handle;
- }
-
- /* Re-enter Cactus to finish the request */
-
- do {
- ret = spci_service_request_resume(TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- token_cactus,
- &rx1, NULL, NULL);
- } while (ret == SPCI_QUEUED);
-
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: Cactus returned %d\n",
- __LINE__, ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* Close handle. */
-exit_close_handle:
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: %d\n",
- __LINE__, ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* All tests finished. */
-
- return result;
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spci_handle_open.c b/tftf/tests/runtime_services/secure_service/test_spci_handle_open.c
deleted file mode 100644
index 522465ee1..000000000
--- a/tftf/tests/runtime_services/secure_service/test_spci_handle_open.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch_helpers.h>
-#include <cactus_def.h>
-#include <debug.h>
-#include <events.h>
-#include <plat_topology.h>
-#include <platform.h>
-#include <smccc.h>
-#include <spci_helpers.h>
-#include <spci_svc.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-
-/*
- * @Test_Aim@ This tests that we can get the handle of a Secure Service and
- * close it correctly.
- */
-test_result_t test_spci_handle_open(void)
-{
- int ret;
- uint16_t handle1, handle2;
-
- /**********************************************************************
- * Verify that SPCI is there and that it has the correct version.
- **********************************************************************/
-
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- /**********************************************************************
- * Try to get handle of an invalid Secure Service.
- **********************************************************************/
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle1,
- CACTUS_INVALID_UUID);
-
- if (ret != SPCI_NOT_PRESENT) {
- tftf_testcase_printf("%d: SPM should have returned SPCI_NOT_PRESENT. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- /**********************************************************************
- * Get handle of valid Secure Services.
- **********************************************************************/
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle1,
- CACTUS_SERVICE1_UUID);
-
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle2,
- CACTUS_SERVICE2_UUID);
-
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- /**********************************************************************
- * Close invalid handle.
- **********************************************************************/
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, ~handle1);
-
- if (ret != SPCI_INVALID_PARAMETER) {
- tftf_testcase_printf("%d: SPM didn't fail to close the handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- /**********************************************************************
- * Close valid handles.
- **********************************************************************/
-
- /* Close in the reverse order to test that it can be done. */
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle2);
-
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle1);
-
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- /**********************************************************************
- * All tests passed.
- **********************************************************************/
-
- return TEST_RESULT_SUCCESS;
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spci_non_blocking_interrupt.c b/tftf/tests/runtime_services/secure_service/test_spci_non_blocking_interrupt.c
deleted file mode 100644
index cb5dd6b0c..000000000
--- a/tftf/tests/runtime_services/secure_service/test_spci_non_blocking_interrupt.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <cactus_def.h>
-#include <debug.h>
-#include <smccc.h>
-#include <spci_helpers.h>
-#include <spci_svc.h>
-#include <string.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-#include <timer.h>
-
-static volatile int timer_irq_received;
-
-/*
- * ISR for the timer interrupt. Update a global variable to check it has been
- * called.
- */
-static int timer_handler(void *data)
-{
- assert(timer_irq_received == 0);
- timer_irq_received = 1;
- return 0;
-}
-
-/*
- * @Test_Aim@ Test that non-secure interrupts interrupt non-blocking requests.
- *
- * 1. Register a handler for the non-secure timer interrupt. Program it to fire
- * in a certain time.
- *
- * 2. Send a non-blocking request to Cactus to sleep for more time than the
- * timer.
- *
- * 3. While servicing the timer sleep request, the non-secure timer should
- * fire and interrupt Cactus.
- *
- * 4. Make sure that the response isn't ready yet.
- *
- * 5. In the TFTF, check that the interrupt has been handled.
- *
- * 6. Return to Cactus to finish the request.
- */
-test_result_t test_spci_non_blocking_interrupt_by_ns(void)
-{
- int ret;
- u_register_t ret1;
- uint16_t handle_cactus;
- uint32_t token_cactus;
- test_result_t result = TEST_RESULT_SUCCESS;
-
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- /* Open handle */
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_cactus,
- CACTUS_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- /* Program timer */
-
- timer_irq_received = 0;
- tftf_timer_register_handler(timer_handler);
-
- ret = tftf_program_timer(100);
- if (ret < 0) {
- tftf_testcase_printf("Failed to program timer (%d)\n", ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* Send request to Cactus */
-
- ret = spci_service_request_start(CACTUS_SLEEP_MS, 200U,
- 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- &token_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: %d\n",
- __LINE__, ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* Check that the interrupt has been handled */
-
- tftf_cancel_timer();
- tftf_timer_unregister_handler();
-
- if (timer_irq_received == 0) {
- tftf_testcase_printf("%d: Didn't handle interrupt\n", __LINE__);
- result = TEST_RESULT_FAIL;
- }
-
- /* Make sure that the response is not ready yet */
-
- ret = spci_service_get_response(TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- token_cactus,
- NULL, NULL, NULL);
-
- if (ret == SPCI_SUCCESS) {
- tftf_testcase_printf("%d: Cactus returned SPCI_SUCCESS\n",
- __LINE__);
- result = TEST_RESULT_FAIL;
- goto exit_close_handle;
- }
-
- /* Re-enter Cactus to finish the request */
-
- do {
- ret = spci_service_request_resume(TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- token_cactus,
- &ret1, NULL, NULL);
- } while (ret == SPCI_QUEUED);
-
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: Cactus returned 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* Close handle */
-exit_close_handle:
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: %d\n",
- __LINE__, ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* All tests finished */
-
- return result;
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spci_non_blocking_request.c b/tftf/tests/runtime_services/secure_service/test_spci_non_blocking_request.c
deleted file mode 100644
index 8d3506b55..000000000
--- a/tftf/tests/runtime_services/secure_service/test_spci_non_blocking_request.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch_helpers.h>
-#include <cactus_def.h>
-#include <debug.h>
-#include <events.h>
-#include <ivy_def.h>
-#include <plat_topology.h>
-#include <platform.h>
-#include <power_management.h>
-#include <smccc.h>
-#include <stdbool.h>
-#include <spci_helpers.h>
-#include <spci_svc.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-
-#define TEST_NUM_ITERATIONS 1000U
-
-test_result_t test_spci_non_blocking_fn(void)
-{
- int ret;
- u_register_t ret1;
- uint16_t handle_cactus, handle_ivy;
- uint32_t token_cactus, token_ivy;
- test_result_t result = TEST_RESULT_SUCCESS;
-
- /* Open handles. */
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_cactus,
- CACTUS_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- goto exit_none;
- }
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_ivy,
- IVY_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- goto exit_cactus;
- }
-
- /* Request services. */
-
- for (unsigned int i = 0U; i < TEST_NUM_ITERATIONS; i++) {
-
- bool exit_loop = false;
-
- /* Send request to Cactus */
-
- ret = spci_service_request_start(CACTUS_GET_MAGIC,
- 0, 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- &token_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- exit_loop = true;
- }
-
- /* Send request to Ivy */
-
- ret = spci_service_request_start(IVY_GET_MAGIC,
- 0, 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_ivy,
- &token_ivy);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- exit_loop = true;
- }
-
- /* Get response from Ivy */
-
- do {
- ret = spci_service_request_resume(TFTF_SPCI_CLIENT_ID,
- handle_ivy,
- token_ivy,
- &ret1, NULL, NULL);
- } while (ret == SPCI_QUEUED);
-
- if ((ret != SPCI_SUCCESS) || (ret1 != IVY_MAGIC_NUMBER)) {
- tftf_testcase_printf("%d: Ivy returned 0x%x 0x%lx\n",
- __LINE__, (uint32_t)ret, ret1);
- result = TEST_RESULT_FAIL;
- exit_loop = true;
- }
-
- /* Get response from Cactus */
-
- do {
- ret = spci_service_request_resume(TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- token_cactus,
- &ret1, NULL, NULL);
- } while (ret == SPCI_QUEUED);
-
- if ((ret != SPCI_SUCCESS) || (ret1 != CACTUS_MAGIC_NUMBER)) {
- tftf_testcase_printf("%d: Cactus returned 0x%x 0x%lx\n",
- __LINE__, (uint32_t)ret, ret1);
- result = TEST_RESULT_FAIL;
- exit_loop = true;
- }
-
- /* If there has been an error, exit early. */
- if (exit_loop) {
- break;
- }
- }
-
- /* Close handles. */
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_ivy);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
-exit_cactus:
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* All tests passed. */
-exit_none:
- return result;
-}
-
-/*
- * @Test_Aim@ This tests opens a Secure Service handle and performs many simple
- * non-blocking requests to Cactus and Ivy.
- */
-test_result_t test_spci_request(void)
-{
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- return test_spci_non_blocking_fn();
-}
-
-/******************************************************************************/
-
-static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
-
-static test_result_t test_spci_non_blocking_multicore_fn(void)
-{
- u_register_t cpu_mpid = read_mpidr_el1() & MPID_MASK;
- unsigned int core_pos = platform_get_core_pos(cpu_mpid);
-
- tftf_send_event(&cpu_has_entered_test[core_pos]);
-
- return test_spci_non_blocking_fn();
-}
-
-/*
- * @Test_Aim@ This tests opens a Secure Service handle and performs many simple
- * non-blocking requests to Cactus and Ivy from multiple cores
- */
-test_result_t test_spci_request_multicore(void)
-{
- unsigned int cpu_node, core_pos;
- int psci_ret;
- u_register_t cpu_mpid;
- u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
-
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
- tftf_init_event(&cpu_has_entered_test[i]);
- }
-
- /* Power on all CPUs */
- for_each_cpu(cpu_node) {
- cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
- /* Skip lead CPU as it is already powered on */
- if (cpu_mpid == lead_mpid) {
- continue;
- }
-
- core_pos = platform_get_core_pos(cpu_mpid);
-
- psci_ret = tftf_cpu_on(cpu_mpid,
- (uintptr_t)test_spci_non_blocking_multicore_fn, 0);
- if (psci_ret != PSCI_E_SUCCESS) {
- tftf_testcase_printf(
- "Failed to power on CPU %d (rc = %d)\n",
- core_pos, psci_ret);
- return TEST_RESULT_FAIL;
- }
- }
-
- /* Wait until all CPUs have started the test. */
- for_each_cpu(cpu_node) {
- cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
- /* Skip lead CPU */
- if (cpu_mpid == lead_mpid) {
- continue;
- }
-
- core_pos = platform_get_core_pos(cpu_mpid);
- tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
- }
-
- /* Enter the test on lead CPU and return the result. */
- return test_spci_non_blocking_fn();
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spm_cpu_features.c b/tftf/tests/runtime_services/secure_service/test_spm_cpu_features.c
deleted file mode 100644
index f57fa243b..000000000
--- a/tftf/tests/runtime_services/secure_service/test_spm_cpu_features.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <cactus_test_cmds.h>
-#include <ffa_endpoints.h>
-#include <ffa_helpers.h>
-#include <test_helpers.h>
-
-#define SENDER HYP_ID
-#define RECEIVER SP_ID(1)
-
-static const struct ffa_uuid expected_sp_uuids[] = { {PRIMARY_UUID} };
-
-static test_result_t simd_vector_compare(simd_vector_t a[SIMD_NUM_VECTORS],
- simd_vector_t b[SIMD_NUM_VECTORS])
-{
- for (unsigned int num = 0U; num < SIMD_NUM_VECTORS; num++) {
- if (memcmp(a[num], b[num], sizeof(simd_vector_t)) != 0) {
- ERROR("Vectors not equal: a:0x%llx b:0x%llx\n",
- (uint64_t)a[num][0], (uint64_t)b[num][0]);
- return TEST_RESULT_FAIL;
- }
- }
- return TEST_RESULT_SUCCESS;
-}
-
-/*
- * Tests that SIMD vectors are preserved during the context switches between
- * normal world and the secure world.
- * Fills the SIMD vectors with known values, requests SP to fill the vectors
- * with a different values, checks that the context is restored on return.
- */
-test_result_t test_simd_vectors_preserved(void)
-{
- SKIP_TEST_IF_AARCH32();
-
- /**********************************************************************
- * Verify that FFA is there and that it has the correct version.
- **********************************************************************/
- CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
-
- simd_vector_t simd_vectors_send[SIMD_NUM_VECTORS],
- simd_vectors_receive[SIMD_NUM_VECTORS];
-
- /* 0x11 is just a dummy value to be distinguished from the value in the
- * secure world. */
- for (unsigned int num = 0U; num < SIMD_NUM_VECTORS; num++) {
- memset(simd_vectors_send[num], 0x11 * num, sizeof(simd_vector_t));
- }
-
- fill_simd_vector_regs(simd_vectors_send);
-
- smc_ret_values ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
-
- if (!is_ffa_direct_response(ret)) {
- return TEST_RESULT_FAIL;
- }
-
- if (cactus_get_response(ret) == CACTUS_ERROR) {
- return TEST_RESULT_FAIL;
- }
-
- read_simd_vector_regs(simd_vectors_receive);
-
- return simd_vector_compare(simd_vectors_send, simd_vectors_receive);
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spm_simd.c b/tftf/tests/runtime_services/secure_service/test_spm_simd.c
new file mode 100644
index 000000000..cfc931f54
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_spm_simd.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cactus_test_cmds.h>
+#include <ffa_endpoints.h>
+#include <ffa_helpers.h>
+#include <fpu.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+#include <lib/extensions/sve.h>
+
+#define SENDER HYP_ID
+#define RECEIVER SP_ID(1)
+#define SVE_TEST_ITERATIONS 100
+#define NS_SVE_OP_ARRAYSIZE 1024
+
+static const struct ffa_uuid expected_sp_uuids[] = { {PRIMARY_UUID} };
+
+static sve_z_regs_t sve_vectors_input;
+static sve_z_regs_t sve_vectors_output;
+static int sve_op_1[NS_SVE_OP_ARRAYSIZE];
+static int sve_op_2[NS_SVE_OP_ARRAYSIZE];
+static fpu_state_t g_fpu_state_write;
+static fpu_state_t g_fpu_state_read;
+
+/*
+ * Tests that SIMD vectors and FPU state are preserved during the context switches between
+ * normal world and the secure world.
+ * Fills the SIMD vectors, FPCR and FPSR with random values, requests SP to fill the vectors
+ * with a different values, request SP to check if secure SIMD context is restored.
+ * Checks that the NS context is restored on return.
+ */
+test_result_t test_simd_vectors_preserved(void)
+{
+ /**********************************************************************
+ * Verify that FF-A is there and that it has the correct version.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ fpu_state_write_rand(&g_fpu_state_write);
+ struct ffa_value ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret) == CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = cactus_req_simd_compare_send_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret) == CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Normal world verify its FPU/SIMD state registers data */
+ fpu_state_read(&g_fpu_state_read);
+ if (fpu_state_compare(&g_fpu_state_write, &g_fpu_state_read) != 0) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Tests that SVE vectors are preserved during the context switches between
+ * normal world and the secure world.
+ * Fills the SVE vectors with known values, requests SP to fill the vectors
+ * with a different values, checks that the context is restored on return.
+ */
+test_result_t test_sve_vectors_preserved(void)
+{
+ uint64_t vl;
+ uint8_t *sve_vector;
+
+ SKIP_TEST_IF_SVE_NOT_SUPPORTED();
+
+ /**********************************************************************
+ * Verify that FF-A is there and that it has the correct version.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /*
+ * Clear SVE vectors buffers used to compare the SVE state before calling
+ * into the Swd compared to SVE state restored after returning to NWd.
+ */
+ memset(sve_vectors_input, 0, sizeof(sve_vectors_input));
+ memset(sve_vectors_output, 0, sizeof(sve_vectors_output));
+
+ /* Set ZCR_EL2.LEN to implemented VL (constrained by EL3). */
+ write_zcr_el2(0xf);
+ isb();
+
+ /* Get the implemented VL. */
+ vl = sve_rdvl_1();
+
+ /* Fill each vector for the VL size with a fixed pattern. */
+ sve_vector = (uint8_t *) sve_vectors_input;
+ for (uint32_t vector_num = 0U; vector_num < SVE_NUM_VECTORS; vector_num++) {
+ memset(sve_vector, 0x11 * (vector_num + 1), vl);
+ sve_vector += vl;
+ }
+
+ /* Fill SVE vector registers with the buffer contents prepared above. */
+ sve_z_regs_write(&sve_vectors_input);
+
+ /*
+ * Call cactus secure partition which uses SIMD (and expect it doesn't
+ * affect the normal world state on return).
+ */
+ struct ffa_value ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret) == CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Get the SVE vectors state after returning to normal world. */
+ sve_z_regs_read(&sve_vectors_output);
+
+ /* Compare to state before calling into secure world. */
+ if (sve_z_regs_compare(&sve_vectors_input, &sve_vectors_output) != 0UL) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Sends SIMD fill command to Cactus SP
+ * Returns:
+ * false - On success
+ * true - On failure
+ */
+#ifdef __aarch64__
+static bool callback_enter_cactus_sp(void)
+{
+ struct ffa_value ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret)) {
+ return true;
+ }
+
+ if (cactus_get_response(ret) == CACTUS_ERROR) {
+ return true;
+ }
+
+ return false;
+}
+#endif /* __aarch64__ */
+
+/*
+ * Tests that SVE vector operations in normal world are not affected by context
+ * switches between normal world and the secure world.
+ */
+test_result_t test_sve_vectors_operations(void)
+{
+ unsigned int val;
+ bool cb_err;
+
+ SKIP_TEST_IF_SVE_NOT_SUPPORTED();
+
+ /**********************************************************************
+ * Verify that FF-A is there and that it has the correct version.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ val = 2 * SVE_TEST_ITERATIONS;
+
+ for (unsigned int i = 0; i < NS_SVE_OP_ARRAYSIZE; i++) {
+ sve_op_1[i] = val;
+ sve_op_2[i] = 1;
+ }
+
+ /* Set ZCR_EL2.LEN to implemented VL (constrained by EL3). */
+ write_zcr_el2(0xf);
+ isb();
+
+ for (unsigned int i = 0; i < SVE_TEST_ITERATIONS; i++) {
+ /* Perform SVE operations with intermittent calls to Swd. */
+ cb_err = sve_subtract_arrays_interleaved(sve_op_1, sve_op_1,
+ sve_op_2,
+ NS_SVE_OP_ARRAYSIZE,
+ &callback_enter_cactus_sp);
+ if (cb_err == true) {
+ ERROR("Callback to Cactus SP failed\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ }
+
+ /* Check result of SVE operations. */
+ for (unsigned int i = 0; i < NS_SVE_OP_ARRAYSIZE; i++) {
+ if (sve_op_1[i] != (val - SVE_TEST_ITERATIONS)) {
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_spm_smmu.c b/tftf/tests/runtime_services/secure_service/test_spm_smmu.c
index b041a976c..6237eb80b 100644
--- a/tftf/tests/runtime_services/secure_service/test_spm_smmu.c
+++ b/tftf/tests/runtime_services/secure_service/test_spm_smmu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,32 +7,173 @@
#include <cactus_test_cmds.h>
#include <debug.h>
#include <ffa_endpoints.h>
+#include <runtime_services/host_realm_managment/host_realm_rmi.h>
#include <smccc.h>
+#include <spm_test_helpers.h>
#include <test_helpers.h>
+#if PLAT_fvp || PLAT_tc
+#include <sp_platform_def.h>
static const struct ffa_uuid expected_sp_uuids[] = { {PRIMARY_UUID} };
+#endif
+
+#define TEST_DMA_ENGINE_MEMCPY (2U)
+#define TEST_DMA_ENGINE_RAND48 (3U)
+
+/*
+ * Attribute encoding for Inner and Outer:
+ * Read-Allocate Write-Allocate Write-Back Normal Memory
+ */
+#define ATTR_ACACHE_RAWAWB_S (0xffU)
+#define ATTR_ACACHE_RAWAWB_NS (0x2ffU)
+
+/* Source attributes occupy the bottom halfword */
+#define DMA_ENGINE_ATTR_SRC_ACACHE_RAWAWB_S ATTR_ACACHE_RAWAWB_S
+#define DMA_ENGINE_ATTR_SRC_ACACHE_RAWAWB_NS ATTR_ACACHE_RAWAWB_NS
+
+/* Destination attributes occupy the top halfword */
+#define DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_S (ATTR_ACACHE_RAWAWB_S << 16)
+#define DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_NS (ATTR_ACACHE_RAWAWB_NS << 16)
/**************************************************************************
- * Send a command to SP1 initiate DMA service with the help of a peripheral
- * device upstream of an SMMUv3 IP
+ * test_smmu_spm
+ *
+ * Send commands to SP1 initiate DMA service with the help of a peripheral
+ * device upstream of an SMMUv3 IP.
+ * The scenario involves randomizing a secure buffer (first DMA operation),
+ * copying this buffer to another location (second DMA operation),
+ * and checking (by CPU) that both buffer contents match.
**************************************************************************/
test_result_t test_smmu_spm(void)
{
- smc_ret_values ret;
+#if PLAT_fvp || PLAT_tc
+ struct ffa_value ret;
/**********************************************************************
* Check SPMC has ffa_version and expected FFA endpoints are deployed.
**********************************************************************/
- CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
- VERBOSE("Sending command to SP %x for initiating DMA transfer\n",
+ VERBOSE("Sending command to SP %x for initiating DMA transfer.\n",
SP_ID(1));
- ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1));
+ /*
+ * Randomize first half of a secure buffer from the secure world
+ * through the SMMU test engine DMA.
+ * Destination memory attributes are secure rawaWB.
+ */
+ ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1),
+ TEST_DMA_ENGINE_RAND48,
+ PLAT_CACTUS_MEMCPY_BASE,
+ PLAT_CACTUS_MEMCPY_RANGE / 2,
+ DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_S);
+
+ /* Expect the SMMU DMA operation to pass. */
+ if (cactus_get_response(ret) != CACTUS_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Copy first half to second half of the buffer and
+ * check both match.
+ * Source and destination memory attributes are secure rawaWB.
+ */
+ ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1),
+ TEST_DMA_ENGINE_MEMCPY,
+ PLAT_CACTUS_MEMCPY_BASE,
+ PLAT_CACTUS_MEMCPY_RANGE,
+ DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_S |
+ DMA_ENGINE_ATTR_SRC_ACACHE_RAWAWB_S);
+
+ /* Expect the SMMU DMA operation to pass. */
+ if (cactus_get_response(ret) != CACTUS_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Copy first half to second half of the non-secure buffer and
+ * check both match.
+ * Source and destination memory attributes are non-secure rawaWB.
+ * This test helps to validate a scenario where a secure stream
+ * belonging to Cactus SP accesses non-secure IPA space.
+ */
+ ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1),
+ TEST_DMA_ENGINE_MEMCPY,
+ PLAT_CACTUS_NS_MEMCPY_BASE,
+ PLAT_CACTUS_MEMCPY_RANGE,
+ DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_NS |
+ DMA_ENGINE_ATTR_SRC_ACACHE_RAWAWB_NS);
+
+ /* Expect the SMMU DMA operation to pass. */
if (cactus_get_response(ret) != CACTUS_SUCCESS) {
return TEST_RESULT_FAIL;
}
return TEST_RESULT_SUCCESS;
+#else
+ return TEST_RESULT_SKIPPED;
+#endif
}
+/**************************************************************************
+ * test_smmu_spm_invalid_access
+ *
+ * The scenario changes a NS buffer PAS into Realm PAS. It then queries a SP
+ * to initiate a secure DMA operation on this buffer through the SMMU.
+ * The operation is expected to fail as a secure DMA transaction to a Realm
+ * region fails SMMU GPC checks.
+ **************************************************************************/
+test_result_t test_smmu_spm_invalid_access(void)
+{
+#if PLAT_fvp || PLAT_tc
+ struct ffa_value ret;
+ u_register_t retmm;
+
+ /* Skip this test if RME is not implemented. */
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /**********************************************************************
+ * Check SPMC has ffa_version and expected FFA endpoints are deployed.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ /* Update the NS buffer to Realm PAS. */
+ retmm = host_rmi_granule_delegate((u_register_t)PLAT_CACTUS_NS_MEMCPY_BASE);
+ if (retmm != 0UL) {
+ ERROR("Granule delegate failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ VERBOSE("Sending command to SP %x for initiating DMA transfer.\n",
+ SP_ID(1));
+
+ /*
+ * Attempt randomizing the buffer (now turned into Realm PAS)
+ * from the secure world through the SMMU test engine DMA.
+ * Destination memory attributes are non-secure rawaWB.
+ */
+ ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1),
+ TEST_DMA_ENGINE_RAND48,
+ PLAT_CACTUS_NS_MEMCPY_BASE,
+ PLAT_CACTUS_MEMCPY_RANGE,
+ DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_NS);
+
+ /* Update the buffer back to NS PAS. */
+ retmm = host_rmi_granule_undelegate((u_register_t)PLAT_CACTUS_NS_MEMCPY_BASE);
+ if (retmm != 0UL) {
+ ERROR("Granule undelegate failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Expect the SMMU DMA operation to have failed. */
+ if (cactus_get_response(ret) != CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+#else
+ return TEST_RESULT_SKIPPED;
+#endif
+}
diff --git a/tftf/tests/runtime_services/standard_service/errata_abi/api_tests/test_errata_abi_functionality.c b/tftf/tests/runtime_services/standard_service/errata_abi/api_tests/test_errata_abi_functionality.c
new file mode 100644
index 000000000..0baf471d3
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/errata_abi/api_tests/test_errata_abi_functionality.c
@@ -0,0 +1,739 @@
+/*
+ * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <events.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <smccc.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <platform_def.h>
+#include <string.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <errata_abi.h>
+
+static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
+
+/* Forward flag */
+#define FORWARD_FLAG_EL1 0x00
+
+/* Extract revision and variant info */
+#define EXTRACT_REV_VAR(x) (x & MIDR_REV_MASK) | ((x >> (MIDR_VAR_SHIFT - MIDR_REV_BITS)) \
+ & MIDR_VAR_MASK)
+
+/* Extract the partnumber */
+#define EXTRACT_PARTNO(x) ((x >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
+
+#define RXPX_RANGE(x, y, z) (((x >= y) && (x <= z)) ? true : false)
+
+/* Global pointer to point to individual cpu structs based on midr value */
+em_cpu_t *cpu_ptr;
+
+/*
+ * Errata list for CPUs. This list needs to be updated
+ * for every new errata added to the errata ABI list.
+ */
+em_cpu_t cortex_A15_errata_list = {
+ .cpu_pn = 0xC0F,
+ .cpu_errata = {
+ {816470, 0x30, 0xFF},
+ {827671, 0x30, 0xFF},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A17_errata_list = {
+ .cpu_pn = 0xC0E,
+ .cpu_errata = {
+ {852421, 0x00, 0x12},
+ {852423, 0x00, 0x12},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A9_errata_list = {
+ .cpu_pn = 0xC09,
+ .cpu_errata = {
+ {790473, 0x00, 0xFF},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A35_errata_list = {
+ .cpu_pn = 0xD04,
+ .cpu_errata = {
+ {855472, 0x00, 0x00},
+ {1234567, 0x00, 0x00},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A53_errata_list = {
+ .cpu_pn = 0xD03,
+ .cpu_errata = {
+ {819472, 0x00, 0x01},
+ {824069, 0x00, 0x02},
+ {826319, 0x00, 0x02},
+ {827319, 0x00, 0x02},
+ {835769, 0x00, 0x04},
+ {836870, 0x00, 0x03},
+ {843419, 0x00, 0x04},
+ {855873, 0x03, 0xFF},
+ {1530924, 0x00, 0xFF},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A55_errata_list = {
+ .cpu_pn = 0xD05,
+ .cpu_errata = {
+ {768277, 0x00, 0x00},
+ {778703, 0x00, 0x00},
+ {798797, 0x00, 0x00},
+ {846532, 0x00, 0x01},
+ {903758, 0x00, 0x01},
+ {1221012, 0x00, 0x10},
+ {1530923, 0x00, 0xFF},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A57_errata_list = {
+ .cpu_pn = 0xD07,
+ .cpu_errata = {
+ {806969, 0x00, 0x00},
+ {813419, 0x00, 0x00},
+ {813420, 0x00, 0x00},
+ {814670, 0x00, 0x00},
+ {817169, 0x00, 0x01},
+ {826974, 0x00, 0x11},
+ {826977, 0x00, 0x11},
+ {828024, 0x00, 0x11},
+ {829520, 0x00, 0x12},
+ {833471, 0x00, 0x12},
+ {859972, 0x00, 0x13},
+ {1319537, 0x00, 0xFF},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A72_errata_list = {
+ .cpu_pn = 0xD08,
+ .cpu_errata = {
+ {859971, 0x00, 0x03},
+ {1234567, 0x00, 0xFF},
+ {1319367, 0x00, 0xFF},
+ {9876543, 0x00, 0xFF},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A73_errata_list = {
+ .cpu_pn = 0xD09,
+ .cpu_errata = {
+ {852427, 0x00, 0x00},
+ {855423, 0x00, 0x01},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A75_errata_list = {
+ .cpu_pn = 0xD0A,
+ .cpu_errata = {
+ {764081, 0x00, 0x00},
+ {790748, 0x00, 0x00},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A76_errata_list = {
+ .cpu_pn = 0xD0B,
+ .cpu_errata = {
+ {1073348, 0x00, 0x10},
+ {1130799, 0x00, 0x20},
+ {1165522, 0x00, 0xFF},
+ {1220197, 0x00, 0x20},
+ {1257314, 0x00, 0x30},
+ {1262606, 0x00, 0x30},
+ {1262888, 0x00, 0x30},
+ {1275112, 0x00, 0x30},
+ {1286807, 0x00, 0x30},
+ {1791580, 0x00, 0x40},
+ {1868343, 0x00, 0x40},
+ {1946160, 0x30, 0x41},
+ {2743102, 0x00, 0x41},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A77_errata_list = {
+ .cpu_pn = 0xD0D,
+ .cpu_errata = {
+ {1508412, 0x00, 0x10},
+ {1791578, 0x00, 0x11},
+ {1800714, 0x00, 0x11},
+ {1925769, 0x00, 0x11},
+ {1946167, 0x00, 0x11},
+ {2356587, 0x00, 0x11},
+ {2743100, 0x00, 0x11},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A78_AE_errata_list = {
+ .cpu_pn = 0xD42,
+ .cpu_errata = {
+ {1941500, 0x00, 0x01},
+ {1951502, 0x00, 0x01},
+ {2376748, 0x00, 0x01},
+ {2712574, 0x00, 0x02},
+ {2395408, 0x00, 0x01},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A78_errata_list = {
+ .cpu_pn = 0xD41,
+ .cpu_errata = {
+ {1688305, 0x00, 0x10},
+ {1821534, 0x00, 0x10},
+ {1941498, 0x00, 0x11},
+ {1951500, 0x10, 0x11},
+ {1952683, 0x00, 0x00},
+ {2132060, 0x00, 0x12},
+ {2242635, 0x10, 0x12},
+ {2376745, 0x00, 0x12},
+ {2395406, 0x00, 0x12},
+ {2712571, 0x00, 0x12},
+ {2742426, 0x00, 0x12},
+ {2772019, 0x00, 0x12},
+ {2779479, 0x00, 0x12},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A78C_errata_list = {
+ .cpu_pn = 0xD4B,
+ .cpu_errata = {
+ {1827430, 0x00, 0x00},
+ {1827440, 0x00, 0x00},
+ {2132064, 0x01, 0x02},
+ {2242638, 0x01, 0x02},
+ {2376749, 0x01, 0x02},
+ {2395411, 0x01, 0x02},
+ {2683027, 0x01, 0x02},
+ {2712575, 0x01, 0x02},
+ {2743232, 0x01, 0x02},
+ {2772121, 0x00, 0x02},
+ {2779484, 0x01, 0x02},
+ {-1}
+ },
+};
+
+
+em_cpu_t cortex_X1_errata_list = {
+ .cpu_pn = 0xD44,
+ .cpu_errata = {
+ {1688305, 0x00, 0x10},
+ {1821534, 0x00, 0x10},
+ {1827429, 0x00, 0x10},
+ {-1}
+ },
+
+};
+
+em_cpu_t neoverse_N1_errata_list = {
+ .cpu_pn = 0xD0C,
+ .cpu_errata = {
+ {1043202, 0x00, 0x10},
+ {1073348, 0x00, 0x10},
+ {1130799, 0x00, 0x20},
+ {1165347, 0x00, 0x20},
+ {1207823, 0x00, 0x20},
+ {1220197, 0x00, 0x20},
+ {1257314, 0x00, 0x30},
+ {1262606, 0x00, 0x30},
+ {1262888, 0x00, 0x30},
+ {1275112, 0x00, 0x30},
+ {1315703, 0x00, 0x30},
+ {1542419, 0x30, 0x40},
+ {1868343, 0x00, 0x40},
+ {1946160, 0x30, 0x41},
+ {2743102, 0x00, 0x41},
+ {-1}
+ },
+};
+
+em_cpu_t neoverse_V1_errata_list = {
+ .cpu_pn = 0xD40,
+ .cpu_errata = {
+ {1618635, 0x00, 0x0F},
+ {1774420, 0x00, 0x10},
+ {1791573, 0x00, 0x10},
+ {1852267, 0x00, 0x10},
+ {1925756, 0x00, 0x11},
+ {1940577, 0x10, 0x11},
+ {1966096, 0x10, 0x11},
+ {2108267, 0x00, 0x11},
+ {2139242, 0x00, 0x11},
+ {2216392, 0x10, 0x11},
+ {2294912, 0x00, 0x11},
+ {2348377, 0x00, 0x11},
+ {2372203, 0x00, 0x11},
+ {2701953, 0x00, 0x11},
+ {2743093, 0x00, 0x12},
+ {2743233, 0x00, 0x12},
+ {2779461, 0x00, 0x12},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A710_errata_list = {
+ .cpu_pn = 0xD47,
+ .cpu_errata = {
+ {1987031, 0x00, 0x20},
+ {2008768, 0x00, 0x20},
+ {2017096, 0x00, 0x20},
+ {2055002, 0x10, 0x20},
+ {2058056, 0x00, 0x10},
+ {2081180, 0x00, 0x20},
+ {2083908, 0x20, 0x20},
+ {2136059, 0x00, 0x20},
+ {2147715, 0x20, 0x20},
+ {2216384, 0x00, 0x20},
+ {2267065, 0x00, 0x20},
+ {2282622, 0x00, 0x21},
+ {2291219, 0x00, 0x20},
+ {2371105, 0x00, 0x20},
+ {2701952, 0x00, 0x21},
+ {2742423, 0x00, 0x21},
+ {2768515, 0x00, 0x21},
+ {2778471, 0x00, 0x21},
+ {-1}
+ },
+};
+
+em_cpu_t neoverse_N2_errata_list = {
+ .cpu_pn = 0xD49,
+ .cpu_errata = {
+ {2002655, 0x00, 0x00},
+ {2025414, 0x00, 0x00},
+ {2067956, 0x00, 0x00},
+ {2138953, 0x00, 0x00},
+ {2138956, 0x00, 0x00},
+ {2138958, 0x00, 0x00},
+ {2189731, 0x00, 0x00},
+ {2242400, 0x00, 0x00},
+ {2242415, 0x00, 0x00},
+ {2280757, 0x00, 0x00},
+ {2326639, 0x00, 0x00},
+ {2376738, 0x00, 0x00},
+ {2388450, 0x00, 0x00},
+ {2728475, 0x00, 0x02},
+ {2743089, 0x00, 0x02},
+ {2779511, 0x00, 0x02},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_X2_errata_list = {
+ .cpu_pn = 0xD48,
+ .cpu_errata = {
+ {2002765, 0x00, 0x20},
+ {2017096, 0x00, 0x20},
+ {2058056, 0x00, 0x20},
+ {2081180, 0x00, 0x20},
+ {2083908, 0x00, 0x20},
+ {2147715, 0x20, 0x20},
+ {2216384, 0x00, 0x20},
+ {2282622, 0x00, 0x21},
+ {2371105, 0x00, 0x21},
+ {2701952, 0x00, 0x21},
+ {2742423, 0x00, 0x21},
+ {2768515, 0x00, 0x21},
+ {2778471, 0x00, 0x21},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A510_errata_list = {
+ .cpu_pn = 0xD46,
+ .cpu_errata = {
+ {1922240, 0x00, 0x00},
+ {2041909, 0x02, 0x02},
+ {2042739, 0x00, 0x02},
+ {2080326, 0x02, 0x02},
+ {2172148, 0x00, 0x10},
+ {2218950, 0x00, 0x10},
+ {2250311, 0x00, 0x10},
+ {2288014, 0x00, 0x10},
+ {2347730, 0x00, 0x11},
+ {2371937, 0x00, 0x11},
+ {2666669, 0x00, 0x11},
+ {2684597, 0x00, 0x12},
+ {1234567, 0x00, 0x12},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_X4_errata_list = {
+ .cpu_pn = 0xD82,
+ .cpu_errata = {
+ {2701112, 0x00, 0x00},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A715_errata_list = {
+ .cpu_pn = 0xD4D,
+ .cpu_errata = {
+ {2561034, 0x10, 0x10},
+ {-1}
+ },
+};
+
+em_cpu_t neoverse_V2_errata_list = {
+ .cpu_pn = 0xD4F,
+ .cpu_errata = {
+ {2331132, 0x00, 0x02},
+ {2618597, 0x00, 0x01},
+ {2662553, 0x00, 0x01},
+ {2719103, 0x00, 0x01},
+ {2719103, 0x00, 0x01},
+ {2719105, 0x00, 0x01},
+ {2743011, 0x00, 0x01},
+ {2779510, 0x00, 0x01},
+ {2801372, 0x00, 0x01},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_X3_errata_list = {
+ .cpu_pn = 0xD4E,
+ .cpu_errata = {
+ {2070301, 0x00, 0x12},
+ {2266875, 0x00, 0x10},
+ {2302506, 0x00, 0x11},
+ {2313909, 0x00, 0x10},
+ {2615812, 0x00, 0x11},
+ {2641945, 0x00, 0x10},
+ {2701951, 0x00, 0x11},
+ {2742421, 0x00, 0x11},
+ {2743088, 0x00, 0x11},
+ {2779509, 0x00, 0x11},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A520_errata_list = {
+ .cpu_pn = 0xD80,
+ .cpu_errata = {
+ {2630792, 0x00, 0x01},
+ {2858100, 0x00, 0x01},
+ {-1}
+ },
+};
+
+/*
+ * Test function checks for the em_version implemented
+ * - Test fails if the version returned is < 1.0.
+ * - Test passes if the version returned is == 1.0
+ */
+test_result_t test_em_version(void)
+{
+ int32_t version_return = tftf_em_abi_version();
+
+ if ((version_return == (EM_ABI_VERSION(1, 0))))
+ return TEST_RESULT_SUCCESS;
+
+ if (version_return == EM_NOT_SUPPORTED)
+ return TEST_RESULT_SKIPPED;
+
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * Test function checks for the em_features implemented
+ * Test fails if the em_feature is not implemented
+ * or if the fid is invalid.
+ */
+
+test_result_t test_em_features(void)
+{
+ int32_t version_return = tftf_em_abi_version();
+
+ if (version_return == EM_NOT_SUPPORTED)
+ return TEST_RESULT_SKIPPED;
+
+ if (!(tftf_em_abi_feature_implemented(EM_CPU_ERRATUM_FEATURES)))
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Test function checks for the em_cpu_feature implemented
+ * Test fails if the em_cpu_feature is not implemented
+ * or if the fid is invalid.
+ */
+test_result_t test_em_cpu_features(void)
+{
+ test_result_t return_val = TEST_RESULT_FAIL;
+ smc_ret_values ret_val;
+
+ uint32_t midr_val = read_midr();
+ uint16_t rxpx_val_extracted = EXTRACT_REV_VAR(midr_val);
+ midr_val = EXTRACT_PARTNO(midr_val);
+
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ INFO("Partnum extracted = %x and rxpx extracted val = %x\n\n", midr_val, \
+ rxpx_val_extracted);
+ switch (midr_val) {
+ case 0xD09:
+ {
+ VERBOSE("MIDR matches A73 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A73_errata_list;
+ break;
+ }
+ case 0xD0B:
+ {
+ VERBOSE("MIDR matches A76 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A76_errata_list;
+ break;
+ }
+ case 0xD4D:
+ {
+ VERBOSE("MIDR matches A715 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A715_errata_list;
+ break;
+ }
+ case 0xD04:
+ {
+ VERBOSE("MIDR matches A35 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A35_errata_list;
+ break;
+ }
+ case 0xD03:
+ {
+ VERBOSE("MIDR matches A53 = %x\n", midr_val);
+ cpu_ptr = &cortex_A53_errata_list;
+ break;
+ }
+ case 0xD07:
+ {
+ VERBOSE("MIDR matches A57 = %x\n", midr_val);
+ cpu_ptr = &cortex_A57_errata_list;
+ break;
+ }
+ case 0xD08:
+ {
+ VERBOSE("MIDR matches A72 = %x\n", midr_val);
+ cpu_ptr = &cortex_A72_errata_list;
+ break;
+ }
+ case 0xD0D:
+ {
+ VERBOSE("MIDR matches A77 = %x\n", midr_val);
+ cpu_ptr = &cortex_A77_errata_list;
+ break;
+ }
+ case 0xD41:
+ {
+ VERBOSE("MIDR matches A78 = %x\n", midr_val);
+ cpu_ptr = &cortex_A78_errata_list;
+ break;
+ }
+ case 0xD0C:
+ {
+ VERBOSE("MIDR matches Neoverse N1 = %x\n", midr_val);
+ cpu_ptr = &neoverse_N1_errata_list;
+ break;
+ }
+ case 0xD4B:
+ {
+ VERBOSE("MIDR matches A78C = %x\n", midr_val);
+ cpu_ptr = &cortex_A78C_errata_list;
+ break;
+ }
+ case 0xD4F:
+ {
+ VERBOSE("MIDR matches Neoverse V2 -> %x\n", midr_val);
+ cpu_ptr = &neoverse_V2_errata_list;
+ break;
+ }
+ case 0xD47:
+ {
+ VERBOSE("MIDR matches A710 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A710_errata_list;
+ break;
+ }
+ case 0xD46:
+ {
+ VERBOSE("MIDR matches A510 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A510_errata_list;
+ break;
+ }
+ case 0xD48:
+ {
+ VERBOSE("MIDR matches X2 -> %x\n", midr_val);
+ cpu_ptr = &cortex_X2_errata_list;
+ break;
+ }
+ case 0xD49:
+ {
+ VERBOSE("MIDR matches Neoverse N2 -> %x\n", midr_val);
+ cpu_ptr = &neoverse_N2_errata_list;
+ break;
+ }
+ case 0xD40:
+ {
+ VERBOSE("MIDR matches Neoverse V1 -> %x\n", midr_val);
+ cpu_ptr = &neoverse_V1_errata_list;
+ break;
+ }
+ case 0xD44:
+ {
+ VERBOSE("MIDR matches X1 -> %x\n", midr_val);
+ cpu_ptr = &cortex_X1_errata_list;
+ break;
+ }
+ case 0xD0A:
+ {
+ VERBOSE("MIDR matches A75 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A75_errata_list;
+ break;
+ }
+ case 0xD05:
+ {
+ VERBOSE("MIDR matches A55 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A55_errata_list;
+ break;
+ }
+ case 0xD42:
+ {
+ VERBOSE("MIDR matches A78_AE -> %x\n", midr_val);
+ cpu_ptr = &cortex_A78_AE_errata_list;
+ break;
+ }
+ case 0xD82:
+ {
+ VERBOSE("MIDR matches Cortex-X4 -> %x\n", midr_val);
+ cpu_ptr = &cortex_X4_errata_list;
+ break;
+ }
+ case 0xD4E:
+ {
+ VERBOSE("MIDR matches Cortex-X3 -> %x\n", midr_val);
+ cpu_ptr = &cortex_X3_errata_list;
+ break;
+ }
+ case 0xD80:
+ {
+ VERBOSE("MIDR matches A520 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A520_errata_list;
+ break;
+ }
+ default:
+ {
+ ERROR("MIDR did not match any cpu\n");
+ return TEST_RESULT_SKIPPED;
+ break;
+ }
+ }
+
+ for (int i = 0; i < ERRATA_COUNT && cpu_ptr->cpu_errata[i].em_errata_id != -1; i++) {
+
+ ret_val = tftf_em_abi_cpu_feature_implemented \
+ (cpu_ptr->cpu_errata[i].em_errata_id, \
+ FORWARD_FLAG_EL1);
+
+ switch (ret_val.ret0) {
+
+ case EM_NOT_AFFECTED:
+ {
+ return_val = (RXPX_RANGE(rxpx_val_extracted, \
+ cpu_ptr->cpu_errata[i].rxpx_low, cpu_ptr->cpu_errata[i].rxpx_high) \
+ == false) ? TEST_RESULT_SUCCESS : TEST_RESULT_FAIL;
+ break;
+ }
+ case EM_AFFECTED:
+ {
+ return_val = TEST_RESULT_SUCCESS;
+ break;
+ }
+ case EM_HIGHER_EL_MITIGATION:
+ {
+ return_val = (RXPX_RANGE(rxpx_val_extracted, \
+ cpu_ptr->cpu_errata[i].rxpx_low, cpu_ptr->cpu_errata[i].rxpx_high) \
+ == true) ? TEST_RESULT_SUCCESS : TEST_RESULT_FAIL;
+ break;
+ }
+ case EM_UNKNOWN_ERRATUM:
+ {
+ return_val = TEST_RESULT_SUCCESS;
+ break;
+ }
+ default:
+ {
+ ERROR("Return value did not match the expected returns\n");
+ return_val = TEST_RESULT_FAIL;
+ break;
+ }
+ }
+ INFO("errata_id = %d and test_em_cpu_erratum_features = %ld\n",\
+ cpu_ptr->cpu_errata[i].em_errata_id, ret_val.ret0);
+ }
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+ return return_val;
+}
+
+test_result_t test_errata_abi_features(void)
+{
+ unsigned int lead_mpid;
+ unsigned int cpu_mpid, cpu_node, core_pos;
+ int psci_ret;
+
+ int32_t version_return = tftf_em_abi_version();
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(1);
+
+ if (version_return == EM_NOT_SUPPORTED) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (!(tftf_em_abi_feature_implemented(EM_CPU_ERRATUM_FEATURES))) {
+ return TEST_RESULT_FAIL;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t)test_em_cpu_features, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d)\n", \
+ cpu_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+ }
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c
index 9e9998c9c..7da63caaa 100644
--- a/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c
@@ -20,13 +20,15 @@
#include <timer.h>
/*
- * Desired affinity level and state type (standby or powerdown) for the next
- * CPU_SUSPEND operation. We need these shared variables because there is no way
- * to pass arguments to non-lead CPUs...
+ * Desired affinity level, state type (standby or powerdown), and entry time for
+ * each CPU in the next CPU_SUSPEND operation. We need these shared variables
+ * because there is no way to pass arguments to non-lead CPUs...
*/
-static unsigned int test_aff_level;
-static unsigned int test_suspend_type;
+static unsigned int test_aff_level[PLATFORM_CORE_COUNT];
+static unsigned int test_suspend_type[PLATFORM_CORE_COUNT];
+static unsigned int test_suspend_entry_time[PLATFORM_CORE_COUNT];
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
static event_t cpu_ready[PLATFORM_CORE_COUNT];
/*
@@ -53,6 +55,38 @@ static int requested_irq_handler(void *data)
return 0;
}
+static test_result_t test_init(unsigned int aff_level,
+ unsigned int suspend_type)
+{
+ if (aff_level > MPIDR_MAX_AFFLVL)
+ return TEST_RESULT_SKIPPED;
+
+ assert((suspend_type == PSTATE_TYPE_POWERDOWN) ||
+ (suspend_type == PSTATE_TYPE_STANDBY));
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i) {
+ /* Export these variables for the non-lead CPUs */
+ test_aff_level[i] = aff_level;
+ test_suspend_type[i] = suspend_type;
+ test_suspend_entry_time[i] =
+ PLAT_SUSPEND_ENTRY_TIME * PLATFORM_CORE_COUNT;
+
+ /*
+ * All testcases in this file use the same arrays so it needs to
+ * be re-initialised each time.
+ */
+ tftf_init_event(&cpu_booted[i]);
+ tftf_init_event(&cpu_ready[i]);
+ tftf_init_event(&event_received_wake_irq[i]);
+ requested_irq_received[i] = 0;
+ }
+
+ /* Ensure the above writes are seen before any read */
+ dmbsy();
+
+ return TEST_RESULT_SUCCESS;
+}
+
/*
* Suspend the calling (non-lead) CPU.
* 1) Program a wake-up event to come out of suspend state
@@ -64,21 +98,26 @@ static test_result_t suspend_non_lead_cpu(void)
{
unsigned int mpid = read_mpidr_el1();
unsigned int core_pos = platform_get_core_pos(mpid);
+ unsigned int aff_level = test_aff_level[core_pos];
+ unsigned int suspend_type = test_suspend_type[core_pos];
uint32_t power_state, stateid;
int rc, expected_return_val;
u_register_t flags;
tftf_timer_register_handler(requested_irq_handler);
- /* Tell the lead CPU that the calling CPU is about to suspend itself */
- tftf_send_event(&cpu_ready[core_pos]);
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ /* Wait for signal from the lead CPU before suspending itself */
+ tftf_wait_for_event(&cpu_ready[core_pos]);
/* IRQs need to be disabled prior to programming the timer */
/* Preserve DAIF flags*/
flags = read_daif();
disable_irq();
- rc = tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME);
+ rc = tftf_program_timer(test_suspend_entry_time[core_pos]);
if (rc != 0) {
/* Restore previous DAIF flags */
write_daif(flags);
@@ -87,15 +126,14 @@ static test_result_t suspend_non_lead_cpu(void)
return TEST_RESULT_FAIL;
}
- expected_return_val = tftf_psci_make_composite_state_id(test_aff_level,
- test_suspend_type, &stateid);
+ expected_return_val = tftf_psci_make_composite_state_id(aff_level,
+ suspend_type,
+ &stateid);
/*
* Suspend the calling CPU to the desired affinity level and power state
*/
- power_state = tftf_make_psci_pstate(test_aff_level,
- test_suspend_type,
- stateid);
+ power_state = tftf_make_psci_pstate(aff_level, suspend_type, stateid);
rc = tftf_cpu_suspend(power_state);
/* Restore previous DAIF flags */
@@ -126,38 +164,17 @@ static test_result_t suspend_non_lead_cpu(void)
*
* The test is skipped if an error occurs during the bring-up of non-lead CPUs.
*/
-static test_result_t test_psci_suspend(unsigned int aff_level,
- unsigned int suspend_type)
+static test_result_t test_psci_suspend(void)
{
unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
unsigned int target_mpid, target_node;
unsigned int core_pos;
+ unsigned int aff_level, suspend_type;
uint32_t power_state, stateid;
int rc, expected_return_val;
+ int aff_info;
u_register_t flags;
- if (aff_level > MPIDR_MAX_AFFLVL)
- return TEST_RESULT_SKIPPED;
-
- assert((suspend_type == PSTATE_TYPE_POWERDOWN) ||
- (suspend_type == PSTATE_TYPE_STANDBY));
-
- /* Export these variables for the non-lead CPUs */
- test_aff_level = aff_level;
- test_suspend_type = suspend_type;
-
- /*
- * All testcases in this file use the same cpu_ready[] array so it needs
- * to be re-initialised each time.
- */
- for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i) {
- tftf_init_event(&cpu_ready[i]);
- tftf_init_event(&event_received_wake_irq[i]);
- requested_irq_received[i] = 0;
- }
- /* Ensure the above writes are seen before any read */
- dmbsy();
-
/*
* Preparation step: Power on all cores.
*/
@@ -168,8 +185,8 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
continue;
rc = tftf_cpu_on(target_mpid,
- (uintptr_t) suspend_non_lead_cpu,
- 0);
+ (uintptr_t) suspend_non_lead_cpu,
+ 0);
if (rc != PSCI_E_SUCCESS) {
tftf_testcase_printf(
"Failed to power on CPU 0x%x (%d)\n",
@@ -178,7 +195,7 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
}
}
- /* Wait for all non-lead CPUs to be ready */
+ /* Wait for all non-lead CPUs to enter the test */
for_each_cpu(target_node) {
target_mpid = tftf_get_mpidr_from_node(target_node);
/* Skip lead CPU */
@@ -186,7 +203,19 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
continue;
core_pos = platform_get_core_pos(target_mpid);
- tftf_wait_for_event(&cpu_ready[core_pos]);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ /* Signal to each non-lead CPU to suspend itself */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_send_event(&cpu_ready[core_pos]);
+ waitms(PLAT_SUSPEND_ENTRY_TIME);
}
/* IRQs need to be disabled prior to programming the timer */
@@ -198,7 +227,7 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
* Program the timer, this will serve as the
* wake-up event to come out of suspend state.
*/
- rc = tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME);
+ rc = tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME * PLATFORM_CORE_COUNT);
if (rc) {
/* Restore previous DAIF flags */
write_daif(flags);
@@ -207,16 +236,18 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
return TEST_RESULT_FAIL;
}
- expected_return_val = tftf_psci_make_composite_state_id(test_aff_level,
- test_suspend_type, &stateid);
+ core_pos = platform_get_core_pos(lead_mpid);
+ aff_level = test_aff_level[core_pos];
+ suspend_type = test_suspend_type[core_pos];
+ expected_return_val = tftf_psci_make_composite_state_id(aff_level,
+ suspend_type,
+ &stateid);
/*
* Suspend the calling CPU to the desired affinity level and power state
*/
- power_state = tftf_make_psci_pstate(test_aff_level,
- test_suspend_type,
- stateid);
- if (test_aff_level >= PSTATE_AFF_LVL_2)
+ power_state = tftf_make_psci_pstate(aff_level, suspend_type, stateid);
+ if (aff_level >= PSTATE_AFF_LVL_2)
rc = tftf_cpu_suspend_save_sys_ctx(power_state);
else
rc = tftf_cpu_suspend(power_state);
@@ -246,6 +277,19 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
tftf_wait_for_event(&event_received_wake_irq[core_pos]);
}
+ /* Wait for all non-lead CPUs to power down */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ do {
+ aff_info = tftf_psci_affinity_info(target_mpid,
+ MPIDR_AFFLVL0);
+ } while (aff_info != PSCI_STATE_OFF);
+ }
+
if (rc == expected_return_val)
return TEST_RESULT_SUCCESS;
@@ -255,11 +299,27 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
}
/*
+ * @Test_Aim@ Suspend to the specified suspend type targeted at the specified
+ * affinity level
+ */
+static test_result_t test_psci_suspend_level(unsigned int aff_level,
+ unsigned int suspend_type)
+{
+ int rc;
+
+ rc = test_init(aff_level, suspend_type);
+ if (rc != TEST_RESULT_SUCCESS)
+ return rc;
+
+ return test_psci_suspend();
+}
+
+/*
* @Test_Aim@ Suspend to powerdown state targeted at affinity level 0
*/
test_result_t test_psci_suspend_powerdown_level0(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_0, PSTATE_TYPE_POWERDOWN);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_0, PSTATE_TYPE_POWERDOWN);
}
/*
@@ -267,7 +327,7 @@ test_result_t test_psci_suspend_powerdown_level0(void)
*/
test_result_t test_psci_suspend_standby_level0(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_0, PSTATE_TYPE_STANDBY);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_0, PSTATE_TYPE_STANDBY);
}
/*
@@ -275,7 +335,7 @@ test_result_t test_psci_suspend_standby_level0(void)
*/
test_result_t test_psci_suspend_powerdown_level1(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_1, PSTATE_TYPE_POWERDOWN);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_1, PSTATE_TYPE_POWERDOWN);
}
/*
@@ -283,7 +343,7 @@ test_result_t test_psci_suspend_powerdown_level1(void)
*/
test_result_t test_psci_suspend_standby_level1(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_1, PSTATE_TYPE_STANDBY);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_1, PSTATE_TYPE_STANDBY);
}
/*
@@ -291,7 +351,7 @@ test_result_t test_psci_suspend_standby_level1(void)
*/
test_result_t test_psci_suspend_powerdown_level2(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_2, PSTATE_TYPE_POWERDOWN);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_2, PSTATE_TYPE_POWERDOWN);
}
/*
@@ -299,7 +359,7 @@ test_result_t test_psci_suspend_powerdown_level2(void)
*/
test_result_t test_psci_suspend_standby_level2(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_2, PSTATE_TYPE_STANDBY);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_2, PSTATE_TYPE_STANDBY);
}
/*
@@ -307,7 +367,7 @@ test_result_t test_psci_suspend_standby_level2(void)
*/
test_result_t test_psci_suspend_powerdown_level3(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_3, PSTATE_TYPE_POWERDOWN);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_3, PSTATE_TYPE_POWERDOWN);
}
/*
@@ -315,5 +375,317 @@ test_result_t test_psci_suspend_powerdown_level3(void)
*/
test_result_t test_psci_suspend_standby_level3(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_3, PSTATE_TYPE_STANDBY);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_3, PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to the specified suspend type targeted at affinity level 0
+ * in OS-initiated mode
+ */
+static test_result_t test_psci_suspend_level0_osi(unsigned int suspend_type)
+{
+ int err, rc;
+
+ err = tftf_psci_set_suspend_mode(PSCI_OS_INIT);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ rc = test_psci_suspend_level(PSTATE_AFF_LVL_0, suspend_type);
+
+ err = tftf_psci_set_suspend_mode(PSCI_PLAT_COORD);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return rc;
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 0 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_powerdown_level0_osi(void)
+{
+ return test_psci_suspend_level0_osi(PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 0 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_standby_level0_osi(void)
+{
+ return test_psci_suspend_level0_osi(PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to the specified suspend type targeted at affinity level 1
+ * in OS-initiated mode
+ */
+static test_result_t test_psci_suspend_level1_osi(unsigned int suspend_type)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int lead_lvl_1_node =
+ tftf_get_parent_node_from_mpidr(lead_mpid, PSTATE_AFF_LVL_1);
+ unsigned int target_mpid, target_node, lvl_1_node, lvl_1_end_node;
+ unsigned int core_pos;
+ tftf_pwr_domain_node_t pd_node;
+ int err, rc;
+
+ err = test_init(PSTATE_AFF_LVL_1, suspend_type);
+ if (err != TEST_RESULT_SUCCESS)
+ return err;
+
+ err = tftf_psci_set_suspend_mode(PSCI_OS_INIT);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ for_each_power_domain_idx(lvl_1_node, PSTATE_AFF_LVL_1) {
+ pd_node = tftf_pd_nodes[lvl_1_node];
+ lvl_1_end_node = pd_node.cpu_start_node + pd_node.ncpus - 1;
+
+ for_each_cpu_in_power_domain(target_node, lvl_1_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU as it is already on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ if (target_node == lvl_1_end_node &&
+ lvl_1_node != lead_lvl_1_node) {
+ test_aff_level[core_pos] = PSTATE_AFF_LVL_1;
+ } else {
+ test_aff_level[core_pos] = PSTATE_AFF_LVL_0;
+ }
+ }
+ }
+
+ rc = test_psci_suspend();
+
+ err = tftf_psci_set_suspend_mode(PSCI_PLAT_COORD);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return rc;
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 1 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_powerdown_level1_osi(void)
+{
+ return test_psci_suspend_level1_osi(PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 1 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_standby_level1_osi(void)
+{
+ return test_psci_suspend_level1_osi(PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to the specified suspend type targeted at affinity level 2
+ * in OS-initiated mode
+ */
+static test_result_t test_psci_suspend_level2_osi(unsigned int suspend_type)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int lead_lvl_1_node =
+ tftf_get_parent_node_from_mpidr(lead_mpid, PSTATE_AFF_LVL_1);
+ unsigned int lead_lvl_2_node =
+ tftf_get_parent_node_from_mpidr(lead_mpid, PSTATE_AFF_LVL_2);
+ unsigned int target_mpid, target_node;
+ unsigned int lvl_1_node, lvl_2_node;
+ unsigned int lvl_1_end_node, lvl_2_end_node;
+ unsigned int core_pos;
+ tftf_pwr_domain_node_t lvl_1_pd_node, lvl_2_pd_node;
+ int err, rc;
+
+ err = test_init(PSTATE_AFF_LVL_2, suspend_type);
+ if (err != TEST_RESULT_SUCCESS)
+ return err;
+
+ err = tftf_psci_set_suspend_mode(PSCI_OS_INIT);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ for_each_power_domain_idx(lvl_2_node, PSTATE_AFF_LVL_2) {
+ lvl_2_pd_node = tftf_pd_nodes[lvl_2_node];
+ lvl_2_end_node =
+ lvl_2_pd_node.cpu_start_node + lvl_2_pd_node.ncpus - 1;
+
+ for_each_power_domain_idx(lvl_1_node, PSTATE_AFF_LVL_1) {
+ lvl_1_pd_node = tftf_pd_nodes[lvl_1_node];
+ if (lvl_1_pd_node.parent_node != lvl_2_node)
+ continue;
+
+ lvl_1_end_node =
+ lvl_1_pd_node.cpu_start_node +
+ lvl_1_pd_node.ncpus - 1;
+
+ for_each_cpu_in_power_domain(target_node, lvl_1_node) {
+ target_mpid =
+ tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU as it is already on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ if (target_node == lvl_1_end_node &&
+ target_node == lvl_2_end_node &&
+ lvl_2_node != lead_lvl_2_node) {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_2;
+ } else if (target_node == lvl_1_end_node &&
+ lvl_1_node != lead_lvl_1_node) {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_1;
+ } else {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_0;
+ }
+ }
+ }
+
+ }
+
+ rc = test_psci_suspend();
+
+ err = tftf_psci_set_suspend_mode(PSCI_PLAT_COORD);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return rc;
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 2 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_powerdown_level2_osi(void)
+{
+ return test_psci_suspend_level2_osi(PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 2 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_standby_level2_osi(void)
+{
+ return test_psci_suspend_level2_osi(PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to the specified suspend type targeted at affinity level 3
+ * in OS-initiated mode
+ */
+static test_result_t test_psci_suspend_level3_osi(unsigned int suspend_type)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int lead_lvl_1_node =
+ tftf_get_parent_node_from_mpidr(lead_mpid, PSTATE_AFF_LVL_1);
+ unsigned int lead_lvl_2_node =
+ tftf_get_parent_node_from_mpidr(lead_mpid, PSTATE_AFF_LVL_2);
+ unsigned int lead_lvl_3_node =
+ tftf_get_parent_node_from_mpidr(lead_mpid, PSTATE_AFF_LVL_3);
+ unsigned int target_mpid, target_node;
+ unsigned int lvl_1_node, lvl_2_node, lvl_3_node;
+ unsigned int lvl_1_end_node, lvl_2_end_node, lvl_3_end_node;
+ unsigned int core_pos;
+ tftf_pwr_domain_node_t lvl_1_pd_node, lvl_2_pd_node, lvl_3_pd_node;
+ int err, rc;
+
+ err = test_init(PSTATE_AFF_LVL_3, PSTATE_TYPE_POWERDOWN);
+ if (err != TEST_RESULT_SUCCESS)
+ return err;
+
+ err = tftf_psci_set_suspend_mode(PSCI_OS_INIT);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ for_each_power_domain_idx(lvl_3_node, PSTATE_AFF_LVL_3) {
+ lvl_3_pd_node = tftf_pd_nodes[lvl_3_node];
+ lvl_3_end_node =
+ lvl_3_pd_node.cpu_start_node + lvl_3_pd_node.ncpus - 1;
+
+ for_each_power_domain_idx(lvl_2_node, PSTATE_AFF_LVL_2) {
+ lvl_2_pd_node = tftf_pd_nodes[lvl_2_node];
+ if (lvl_2_pd_node.parent_node != lvl_3_node)
+ continue;
+
+ lvl_2_end_node =
+ lvl_2_pd_node.cpu_start_node + lvl_2_pd_node.ncpus - 1;
+
+ for_each_power_domain_idx(lvl_1_node, PSTATE_AFF_LVL_1) {
+ lvl_1_pd_node = tftf_pd_nodes[lvl_1_node];
+ if (lvl_1_pd_node.parent_node != lvl_2_node)
+ continue;
+
+ lvl_1_end_node =
+ lvl_1_pd_node.cpu_start_node +
+ lvl_1_pd_node.ncpus - 1;
+
+ for_each_cpu_in_power_domain(target_node, lvl_1_node) {
+ target_mpid =
+ tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU as it is already on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ if (target_node == lvl_1_end_node &&
+ target_node == lvl_2_end_node &&
+ target_node == lvl_3_end_node &&
+ lvl_3_node != lead_lvl_3_node) {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_3;
+ }
+ if (target_node == lvl_1_end_node &&
+ target_node == lvl_2_end_node &&
+ lvl_2_node != lead_lvl_2_node) {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_2;
+ } else if (target_node == lvl_1_end_node &&
+ lvl_1_node != lead_lvl_1_node) {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_1;
+ } else {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_0;
+ }
+ }
+ }
+
+ }
+ }
+
+ rc = test_psci_suspend();
+
+ err = tftf_psci_set_suspend_mode(PSCI_PLAT_COORD);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return rc;
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 3 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_powerdown_level3_osi(void)
+{
+ return test_psci_suspend_level3_osi(PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 3 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_standby_level3_osi(void)
+{
+ return test_psci_suspend_level3_osi(PSTATE_TYPE_STANDBY);
}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c
index 4732796c1..2a1e9e70c 100644
--- a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c
@@ -30,12 +30,16 @@ typedef struct psci_stat_data {
u_register_t residency;
} psci_stat_data_t;
-/* Assuming 3 power levels as maximum */
+/* Assuming 4 power levels as maximum */
#define MAX_STAT_STATES (PLAT_MAX_PWR_STATES_PER_LVL * \
PLAT_MAX_PWR_STATES_PER_LVL * \
+ PLAT_MAX_PWR_STATES_PER_LVL * \
PLAT_MAX_PWR_STATES_PER_LVL)
-CASSERT(PLAT_MAX_PWR_LEVEL <= 2, assert_maximum_defined_stat_array_size_exceeded);
+/* Based on PSCI_MAX_PWR_LVL in tf-a
+ * See: https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/tree/include/lib/psci/psci.h#n38
+ */
+CASSERT(PLAT_MAX_PWR_LEVEL <= 3, assert_maximum_defined_stat_array_size_exceeded);
/*
* The data structure holding stat information as queried by each CPU.
diff --git a/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c
index 99f685406..58ece719d 100644
--- a/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c
+++ b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c
@@ -47,27 +47,6 @@ static unsigned int include_cpu_suspend;
static test_result_t secondary_cpu_on_race_test(void);
/*
- * Utility function to wait for all CPUs other than the caller to be
- * OFF.
- */
-static void wait_for_non_lead_cpus(void)
-{
- unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
- unsigned int target_mpid, target_node;
-
- for_each_cpu(target_node) {
- target_mpid = tftf_get_mpidr_from_node(target_node);
- /* Skip lead CPU, as it is powered on */
- if (target_mpid == lead_mpid)
- continue;
-
- while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0)
- != PSCI_STATE_OFF)
- ;
- }
-}
-
-/*
* Update per-cpu counter corresponding to the current CPU.
* This function updates 2 counters, one in normal memory and the other
* in coherent device memory. The counts are then compared to check if they
diff --git a/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S b/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S
index 74fe4a60c..a88723465 100644
--- a/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S
+++ b/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -13,6 +13,7 @@
.globl sdei_entrypoint_resume
.globl sdei_handler_done
.globl sdei_rm_any_entrypoint
+ .globl sdei_check_pstate_entrypoint
.local event_handled
.comm event_handled, PLATFORM_CORE_COUNT * 4, 8
@@ -126,6 +127,33 @@ func sdei_rm_any_entrypoint
b .
endfunc sdei_rm_any_entrypoint
+func sdei_check_pstate_entrypoint
+ stp x2, x30, [sp, #-16]!
+
+ /* Dispatch to C handler */
+ bl sdei_check_pstate_handler
+
+ /* Calculate address of event completion variable */
+ mrs x0, mpidr_el1
+ mov_imm x1, MPID_MASK
+ and x0, x0, x1
+ bl platform_get_core_pos
+ lsl x0, x0, #2
+ adrp x1, event_handled
+ add x1, x1, :lo12:event_handled
+ add x1, x0, x1
+
+ /* Mark event handling as complete so `sdei_handler_done` can return */
+ mov w2, #1
+ str w2, [x1]
+ sev
+
+ /* Populate `x0` and `x1` to prepare for SMC call */
+ ldp x1, x30, [sp], #16
+ mov_imm x0, SDEI_EVENT_COMPLETE_AND_RESUME
+ smc #0
+endfunc sdei_check_pstate_entrypoint
+
#else /* AARCH32 */
func sdei_entrypoint
/* SDEI is not supported on AArch32. */
diff --git a/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_pstate.c b/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_pstate.c
new file mode 100644
index 000000000..339e4ba5b
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_pstate.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <sdei.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#ifdef __aarch64__
+
+#define EV_COOKIE 0xDEADBEEF
+
+extern sdei_handler_t sdei_check_pstate_entrypoint;
+
+u_register_t daif;
+u_register_t sp;
+u_register_t pan;
+u_register_t dit;
+
+int sdei_check_pstate_handler(int ev, unsigned long long arg)
+{
+ printf("%s: handler fired\n", __func__);
+ daif = read_daif();
+ sp = read_spsel();
+ if (is_armv8_1_pan_present())
+ pan = read_pan();
+
+ if (is_armv8_4_dit_present())
+ dit = read_dit();
+
+ assert(arg == EV_COOKIE);
+ return 0;
+}
+
+static test_result_t sdei_event_check_pstate(void)
+{
+ long long ret;
+
+ ret = sdei_event_register(0, sdei_check_pstate_entrypoint, EV_COOKIE,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(0);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+ goto err0;
+ }
+
+ ret = sdei_pe_unmask();
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+ goto err1;
+ }
+
+ /* Check the common bits are set correctly */
+ ret = sdei_event_signal(read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: 0x%llx\n", ret);
+ goto err2;
+ }
+ sdei_handler_done();
+
+ u_register_t all_interrupts_masked = 0x3c0;
+
+ if (daif != all_interrupts_masked) {
+ tftf_testcase_printf("Interrupts were not correctly masked " \
+ "during SDEI event signal\n" \
+ "Expected DAIF: 0x%lx, " \
+ "Actual DAIF: 0x%lx\n",
+ all_interrupts_masked, daif);
+ ret = -1;
+ goto err1;
+ }
+
+ u_register_t use_sp_elx = 0x1;
+
+ if (sp != use_sp_elx) {
+ tftf_testcase_printf("The SPSel PSTATE Bit was not set " \
+ "correctly during SDEI event signal\n" \
+ "Expected SPSel: 0x%lx, " \
+ "Actual SPSel: 0x%lx\n",
+ use_sp_elx, sp);
+ ret = -1;
+ goto err1;
+ }
+
+ if (is_armv8_1_pan_present()) {
+ printf("PAN Enabled so testing PAN PSTATE bit\n");
+
+ /* Test that the SPAN condition is met.
+ * Unset the SPAN bit
+ */
+ u_register_t old_sctlr = read_sctlr_el2();
+
+ write_sctlr_el2(old_sctlr & ~SCTLR_SPAN_BIT);
+
+ u_register_t old_hcr_el2 = read_hcr_el2();
+
+ /*
+ * Check that when the SPAN bit is 0
+ * the PAN PSTATE bit is maintained
+ */
+
+ if ((old_hcr_el2 & HCR_TGE_BIT) == 0U) {
+ /*
+ * Check that when the HCR_EL2.TGE != 1
+ * the PAN bit is maintained
+ */
+
+ /* When PAN bit is 0 */
+ u_register_t expected_pan = 0;
+ write_pan(expected_pan);
+
+ ret = sdei_event_signal(read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: " \
+ "0x%llx\n", ret);
+ goto err2;
+ }
+ sdei_handler_done();
+ if (pan != expected_pan) {
+ tftf_testcase_printf("PAN PSTATE bit not maintained" \
+ "during SDEI event signal " \
+ "when the SPAN bit is unset and " \
+ "HCR_EL2.TGE != 1 \n" \
+ "Expected PAN: 0x%lx, " \
+ "Actual PAN: 0x%lx\n",
+ expected_pan, pan);
+ ret = -1;
+ goto err1;
+ }
+
+ /* When PAN Bit is 1 */
+ expected_pan = PAN_BIT;
+ write_pan(expected_pan);
+ ret = sdei_event_signal(read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: " \
+ "0x%llx\n", ret);
+ goto err2;
+ }
+ sdei_handler_done();
+ if (pan != expected_pan) {
+ tftf_testcase_printf("PAN PSTATE bit not maintained" \
+ "during SDEI event signal " \
+ "when the SPAN bit is unset and " \
+ "HCR_EL2.TGE != 1 \n" \
+ "Expected PAN: 0x%lx, " \
+ "Actual PAN: 0x%lx\n",
+ expected_pan, pan);
+ ret = -1;
+ goto err1;
+ }
+
+ }
+
+ /*
+ * Check that when the HCR_EL2.TGE = 1 and SPAN bit is unset,
+ * PAN bit is forced to 1.
+ * Set the TGE bit
+ */
+
+ write_hcr_el2(old_hcr_el2 | HCR_TGE_BIT);
+
+ ret = sdei_event_signal(read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: " \
+ "0x%llx\n", ret);
+ goto err2;
+ }
+ sdei_handler_done();
+ if (pan != PAN_BIT) {
+ tftf_testcase_printf("PAN PSTATE bit was not forced " \
+ "to 1 during SDEI event signal " \
+ "when the SPAN bit is unset and " \
+ "HCR_EL2.TGE = 1 \n");
+ ret = -1;
+ goto err1;
+ }
+
+ /*
+ * Set the SCTLR and HCR_EL2 registers back to their old values
+ */
+ write_sctlr_el2(old_sctlr);
+ write_hcr_el2(old_hcr_el2);
+ }
+
+ /* Check that the DIT PSTATE bit is maintained during event signal */
+ if (is_armv8_4_dit_present()) {
+ printf("DIT Enabled so testing DIT PSTATE bit\n");
+ /* When DIT bit is 0 */
+ u_register_t expected_dit = 0;
+
+ write_dit(expected_dit);
+ ret = sdei_event_signal(read_mpidr_el1());
+
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: " \
+ "0x%llx\n", ret);
+ goto err2;
+ }
+ sdei_handler_done();
+ if (dit != expected_dit) {
+ tftf_testcase_printf("DIT PSTATE bit not maintained " \
+ "during SDEI event signal\n" \
+ "Expected DIT: 0x%lx, " \
+ "Actual DIT: 0x%lx\n",
+ expected_dit, dit);
+ ret = -1;
+ goto err1;
+ }
+
+ /* When dit bit is 1 */
+ expected_dit = DIT_BIT;
+ write_dit(expected_dit);
+ ret = sdei_event_signal(read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: " \
+ "0x%llx\n", ret);
+ goto err2;
+ }
+ sdei_handler_done();
+ if (dit != expected_dit) {
+ tftf_testcase_printf("DIT PSTATE bit not maintained " \
+ "during SDEI event signal\n" \
+ "Expected DIT: 0x%lx, " \
+ "Actual DIT: 0x%lx\n",
+ expected_dit, dit);
+ ret = -1;
+ goto err1;
+ }
+ }
+
+err2:
+ sdei_pe_mask();
+err1:
+ sdei_event_disable(0);
+err0:
+ sdei_event_unregister(0);
+
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+#endif /* __aarch64__ */
+
+/* Each core signals itself using SDEI event signalling. */
+test_result_t test_sdei_event_check_pstate(void)
+{
+ SKIP_TEST_IF_AARCH32();
+#ifdef __aarch64__
+ long long ret;
+
+ ret = sdei_version();
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ tftf_testcase_printf("Unexpected SDEI version: 0x%llx\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ disable_irq();
+ /* We only need to run these tests on the main CPU */
+ if (sdei_event_check_pstate() != TEST_RESULT_SUCCESS) {
+ ret = -1;
+ goto err0;
+ }
+
+err0:
+ enable_irq();
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+ return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/runtime_services/standard_service/trng/api_tests/test_trng.c b/tftf/tests/runtime_services/standard_service/trng/api_tests/test_trng.c
index 64b8db78f..72a4ec5ab 100644
--- a/tftf/tests/runtime_services/standard_service/trng/api_tests/test_trng.c
+++ b/tftf/tests/runtime_services/standard_service/trng/api_tests/test_trng.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -29,7 +29,6 @@ test_result_t test_trng_version(void)
return TEST_RESULT_SKIPPED;
}
-
if (version < TRNG_VERSION(1, 0)) {
return TEST_RESULT_FAIL;
}
@@ -51,8 +50,7 @@ test_result_t test_trng_features(void)
return TEST_RESULT_SKIPPED;
}
- if (!(tftf_trng_feature_implemented(SMC_TRNG_VERSION) &&
- tftf_trng_feature_implemented(SMC_TRNG_FEATURES) &&
+ if (!(tftf_trng_feature_implemented(SMC_TRNG_FEATURES) &&
tftf_trng_feature_implemented(SMC_TRNG_UUID) &&
tftf_trng_feature_implemented(SMC_TRNG_RND))) {
return TEST_RESULT_FAIL;
@@ -75,6 +73,11 @@ test_result_t test_trng_rnd(void)
return TEST_RESULT_SKIPPED;
}
+ /* Ensure function is implemented before requesting Entropy */
+ if (!(tftf_trng_feature_implemented(SMC_TRNG_RND))) {
+ return TEST_RESULT_FAIL;
+ }
+
/* Test invalid entropy sizes */
rnd_out = tftf_trng_rnd(U(0));
if (rnd_out.ret0 != TRNG_E_INVALID_PARAMS) {
@@ -97,7 +100,7 @@ test_result_t test_trng_rnd(void)
/* For N = 1, all returned entropy bits should be 0
* except the least significant bit */
rnd_out = tftf_trng_rnd(U(1));
- if (rnd_out.ret0 == TRNG_E_NO_ENTOPY) {
+ if (rnd_out.ret0 == TRNG_E_NO_ENTROPY) {
WARN("There is not a single bit of entropy\n");
return TEST_RESULT_SKIPPED;
}
@@ -116,7 +119,7 @@ test_result_t test_trng_rnd(void)
/* For N = MAX_BITS-1, the most significant bit should be 0 */
rnd_out = tftf_trng_rnd(TRNG_MAX_BITS - U(1));
- if (rnd_out.ret0 == TRNG_E_NO_ENTOPY) {
+ if (rnd_out.ret0 == TRNG_E_NO_ENTROPY) {
WARN("There is not a single bit of entropy\n");
return TEST_RESULT_SKIPPED;
}
diff --git a/tftf/tests/runtime_services/trusted_os/tsp/test_pstate_after_exception.c b/tftf/tests/runtime_services/trusted_os/tsp/test_pstate_after_exception.c
new file mode 100644
index 000000000..a17a54e1a
--- /dev/null
+++ b/tftf/tests/runtime_services/trusted_os/tsp/test_pstate_after_exception.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <test_helpers.h>
+
+/*
+ * Test that the PSTATE bits not set in Aarch64.TakeException but
+ * set to a default when taking an exception to EL3 are maintained
+ * after an exception and that changes in TSP do not effect the PSTATE
+ * in TFTF and vice versa.
+ */
+test_result_t tsp_check_pstate_maintained_on_exception(void)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values ret;
+ u_register_t dit;
+ u_register_t dit_bit;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+ SKIP_TEST_IF_DIT_NOT_SUPPORTED();
+
+#ifdef __aarch64__
+ dit_bit = DIT_BIT;
+#else
+ dit_bit = CPSR_DIT_BIT;
+#endif
+
+ write_dit(dit_bit);
+
+ /* Standard SMC */
+ tsp_svc_params.fid = TSP_STD_FID(TSP_CHECK_DIT);
+ tsp_svc_params.arg1 = 0;
+ tsp_svc_params.arg2 = 0;
+ ret = tftf_smc(&tsp_svc_params);
+ if (ret.ret1 == 0) {
+ if (ret.ret2 == 0xffff) {
+ tftf_testcase_printf("DIT bit not supported by TSP");
+ return TEST_RESULT_SKIPPED;
+ }
+ tftf_testcase_printf("DIT bit in the TSP is not 0.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ dit = read_dit();
+ if (dit != dit_bit) {
+ tftf_testcase_printf("DIT bit in TFTF was not maintained.\n"
+ "Expected: 0x%x, Actual: 0x%x",
+ (uint32_t) dit_bit, (uint32_t) dit);
+ return TEST_RESULT_FAIL;
+ }
+
+ tsp_svc_params.fid = TSP_STD_FID(TSP_CHECK_DIT);
+ tsp_svc_params.arg1 = dit_bit;
+ tsp_svc_params.arg2 = 0;
+ ret = tftf_smc(&tsp_svc_params);
+ if (ret.ret1 == 0) {
+ tftf_testcase_printf("DIT bit in the TSP was not maintained\n"
+ "Expected: 0x%x, Actual: 0x%x",
+ (uint32_t) dit_bit, (uint32_t) ret.ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ dit = read_dit();
+ if (dit != dit_bit) {
+ tftf_testcase_printf("DIT bit in TFTF was not maintained.\n"
+ "Expected: 0x%x, Actual: 0x%x",
+ (uint32_t) dit_bit, (uint32_t) dit);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/tbb-tests/tbb_test_infra.c b/tftf/tests/tbb-tests/tbb_test_infra.c
new file mode 100644
index 000000000..dc8ae385d
--- /dev/null
+++ b/tftf/tests/tbb-tests/tbb_test_infra.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "tbb_test_infra.h"
+
+#include <fwu_nvm.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <status.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+test_result_t test_corrupt_boot_fip(unsigned int offset)
+{
+ unsigned int flag = 0xDEADBEEF;
+ size_t written = 0;
+ uintptr_t dev_handle;
+ int result;
+
+ if (tftf_is_rebooted()) {
+ /* FIP successfully repaired */
+ return TEST_RESULT_SUCCESS;
+ }
+
+ /* Corrupt the FIP at the provided offset */
+ plat_get_nvm_handle(&dev_handle);
+ result = io_seek(dev_handle, IO_SEEK_SET, offset);
+ TEST_ASSERT(result == IO_SUCCESS);
+ result = io_write(dev_handle, (uintptr_t) &flag, sizeof(flag), &written);
+ TEST_ASSERT(result == IO_SUCCESS);
+ TEST_ASSERT(written == sizeof(flag));
+
+ /*
+ * Now reboot the system.
+ * On the next boot, EL3 firmware should notice and repair the corruption
+ * before re-entering TFTF
+ */
+
+ tftf_notify_reboot();
+ psci_system_reset();
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/tbb-tests/tbb_test_infra.h b/tftf/tests/tbb-tests/tbb_test_infra.h
new file mode 100644
index 000000000..e6bf0e598
--- /dev/null
+++ b/tftf/tests/tbb-tests/tbb_test_infra.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TBB_TEST_INFRA_H_INCLUDED
+#define TBB_TEST_INFRA_H_INCLUDED
+
+#include <tftf_lib.h>
+
+test_result_t test_corrupt_boot_fip(unsigned int offset);
+
+#endif /* TBB_TEST_INFRA_H_INCLUDED */
+
diff --git a/tftf/tests/tbb-tests/test_tbb_corrupt_fip.c b/tftf/tests/tbb-tests/test_tbb_corrupt_fip.c
new file mode 100644
index 000000000..135efeea0
--- /dev/null
+++ b/tftf/tests/tbb-tests/test_tbb_corrupt_fip.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <uuid.h>
+
+#include "tbb_test_infra.h"
+
+#include <firmware_image_package.h>
+#include <platform.h>
+#include <tftf_lib.h>
+#include <uuid_utils.h>
+
+/*
+ * Return the offset relative to the base of the FIP of
+ * the image described by the uuid. 0 is returned on failure.
+ * The first image will not have an offset of 0, as the header
+ * exists at offset 0.
+ */
+static unsigned int
+find_offset_in_fip(const uuid_t *uuid)
+{
+ fip_toc_entry_t *current_file =
+ (fip_toc_entry_t *) (PLAT_ARM_FIP_BASE + sizeof(fip_toc_header_t));
+
+ while (!is_uuid_null(&(current_file->uuid))) {
+ if (uuid_equal(&(current_file->uuid), uuid)) {
+ return current_file->offset_address;
+ }
+ current_file += 1;
+ };
+ return 0;
+}
+
+test_result_t test_tbb_tkey_cert_header(void)
+{
+ static const uuid_t tkey_cert_uuid = UUID_TRUSTED_KEY_CERT;
+ unsigned int image_offset = find_offset_in_fip(&tkey_cert_uuid);
+
+ TEST_ASSERT_SKIP(image_offset != 0);
+ return test_corrupt_boot_fip(image_offset);
+}
+
diff --git a/tftf/tests/tests-corrupt-fip.mk b/tftf/tests/tests-corrupt-fip.mk
new file mode 100644
index 000000000..22fa686ed
--- /dev/null
+++ b/tftf/tests/tests-corrupt-fip.mk
@@ -0,0 +1,15 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/tbb-tests/, \
+ test_tbb_corrupt_fip.c \
+ tbb_test_infra.c \
+)
+
+TESTS_SOURCES += plat/common/fwu_nvm_accessors.c \
+ plat/arm/common/arm_fwu_io_storage.c \
+ drivers/io/io_fip.c \
+ drivers/io/io_memmap.c
diff --git a/tftf/tests/tests-corrupt-fip.xml b/tftf/tests/tests-corrupt-fip.xml
new file mode 100644
index 000000000..6bfa4a48a
--- /dev/null
+++ b/tftf/tests/tests-corrupt-fip.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="tbb corrupt trusted key header" description="The FIP is corrupted before update">
+ <testcase name="tbb bad tkey cert header" function="test_tbb_tkey_cert_header" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-cpu-extensions.mk b/tftf/tests/tests-cpu-extensions.mk
index fedf7837d..b0af1a3af 100644
--- a/tftf/tests/tests-cpu-extensions.mk
+++ b/tftf/tests/tests-cpu-extensions.mk
@@ -1,18 +1,29 @@
#
-# Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2024, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
TESTS_SOURCES += $(addprefix tftf/tests/, \
+ extensions/afp/test_afp.c \
extensions/amu/test_amu.c \
+ extensions/brbe/test_brbe.c \
+ extensions/ecv/test_ecv.c \
+ extensions/fgt/test_fgt.c \
+ extensions/pmuv3/test_pmuv3.c \
+ extensions/mpam/test_mpam.c \
extensions/mte/test_mte.c \
- extensions/sve/sve_operations.S \
+ extensions/pauth/test_pauth.c \
+ extensions/sme/test_sme.c \
+ extensions/sme/test_sme2.c \
+ extensions/spe/test_spe.c \
extensions/sve/test_sve.c \
- extensions/fgt/test_fgt.c \
- extensions/ecv/test_ecv.c \
+ extensions/sys_reg_trace/test_sys_reg_trace.c \
+ extensions/trbe/test_trbe.c \
+ extensions/trf/test_trf.c \
+ extensions/wfxt/test_wfxt.c \
+ runtime_services/arm_arch_svc/smccc_arch_soc_id.c \
runtime_services/arm_arch_svc/smccc_arch_workaround_1.c \
runtime_services/arm_arch_svc/smccc_arch_workaround_2.c \
- runtime_services/arm_arch_svc/smccc_arch_soc_id.c \
- extensions/pauth/test_pauth.c \
+ runtime_services/arm_arch_svc/smccc_arch_workaround_3.c \
)
diff --git a/tftf/tests/tests-cpu-extensions.xml b/tftf/tests/tests-cpu-extensions.xml
index 08a65c7fc..3b9334468 100644
--- a/tftf/tests/tests-cpu-extensions.xml
+++ b/tftf/tests/tests-cpu-extensions.xml
@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ Copyright (c) 2018-2024, Arm Limited. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-->
@@ -16,15 +16,30 @@
<testcase name="Use Pointer Authentication Instructions" function="test_pauth_instructions" />
<testcase name="Check for Pointer Authentication key leakage from EL3" function="test_pauth_leakage" />
<testcase name="Check for Pointer Authentication key leakage from TSP" function="test_pauth_leakage_tsp" />
+ <testcase name="Access MPAM registers" function="test_mpam_reg_access" />
<testcase name="Use MTE Instructions" function="test_mte_instructions" />
<testcase name="Check for MTE register leakage" function="test_mte_leakage" />
<testcase name="Use FGT Registers" function="test_fgt_enabled" />
<testcase name="Use ECV Registers" function="test_ecv_enabled" />
+ <testcase name="Use trace buffer control Registers" function="test_trbe_enabled" />
+ <testcase name="Use branch record buffer control registers" function="test_brbe_enabled" />
+ <testcase name="Use trace filter control Registers" function="test_trf_enabled" />
+ <testcase name="Use trace system Registers" function="test_sys_reg_trace_enabled" />
+ <testcase name="SME support" function="test_sme_support" />
+ <testcase name="SME2 support" function="test_sme2_support" />
+ <testcase name="SPE support" function="test_spe_support" />
+ <testcase name="AFP support" function="test_afp_support" />
+ <testcase name="Test wfit instruction" function="test_wfit_instruction" />
+ <testcase name="Test wfet instruction" function="test_wfet_instruction" />
+ <testcase name="PMUv3 cycle counter functional in NS" function="test_pmuv3_cycle_works_ns" />
+ <testcase name="PMUv3 event counter functional in NS" function="test_pmuv3_event_works_ns" />
+ <testcase name="PMUv3 SMC counter preservation" function="test_pmuv3_el3_preserves" />
</testsuite>
<testsuite name="ARM_ARCH_SVC" description="Arm Architecture Service tests">
<testcase name="SMCCC_ARCH_WORKAROUND_1 test" function="test_smccc_arch_workaround_1" />
<testcase name="SMCCC_ARCH_WORKAROUND_2 test" function="test_smccc_arch_workaround_2" />
+ <testcase name="SMCCC_ARCH_WORKAROUND_3 test" function="test_smccc_arch_workaround_3" />
<testcase name="SMCCC_ARCH_SOC_ID test" function="test_smccc_arch_soc_id" />
</testsuite>
diff --git a/tftf/tests/tests-ea-ffh.mk b/tftf/tests/tests-ea-ffh.mk
new file mode 100644
index 000000000..be0eb65c0
--- /dev/null
+++ b/tftf/tests/tests-ea-ffh.mk
@@ -0,0 +1,7 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += tftf/tests/misc_tests/test_ea_ffh.c
diff --git a/tftf/tests/tests-ea-ffh.xml b/tftf/tests/tests-ea-ffh.xml
new file mode 100644
index 000000000..1d31b8cff
--- /dev/null
+++ b/tftf/tests/tests-ea-ffh.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="Inject External aborts" description="Injected EA's gets handled in EL3">
+ <testcase name="Inject syncEA which gets handled in EL3" function="test_inject_syncEA" />
+ <testcase name="Inject Serror which gets handled in EL3" function="test_inject_serror" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-errata_abi.mk b/tftf/tests/tests-errata_abi.mk
new file mode 100644
index 000000000..410dc13fe
--- /dev/null
+++ b/tftf/tests/tests-errata_abi.mk
@@ -0,0 +1,7 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += tftf/tests/runtime_services/standard_service/errata_abi/api_tests/test_errata_abi_functionality.c
diff --git a/tftf/tests/tests-errata_abi.xml b/tftf/tests/tests-errata_abi.xml
new file mode 100644
index 000000000..d1964757e
--- /dev/null
+++ b/tftf/tests/tests-errata_abi.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="EM-ABI" description="Errata ABI Feature Implementation">
+ <testcase name="Version" function="test_em_version" />
+ <testcase name="EM_cpu_features" function="test_errata_abi_features" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-extensive.mk b/tftf/tests/tests-extensive.mk
index d1c6c5e08..5e0f0068b 100644
--- a/tftf/tests/tests-extensive.mk
+++ b/tftf/tests/tests-extensive.mk
@@ -6,6 +6,8 @@
# Run all standard tests, plus the extensive ones.
include tftf/tests/tests-standard.mk
-TESTS_MAKEFILE += tftf/tests/tests-psci-extensive.mk
+
+TESTS_MAKEFILE += tftf/tests/tests-psci-extensive.mk \
+ tftf/tests/tests-timer-stress.mk
include ${TESTS_MAKEFILE}
diff --git a/tftf/tests/tests-extensive.xml b/tftf/tests/tests-extensive.xml
index 773c19e0f..e861b4866 100644
--- a/tftf/tests/tests-extensive.xml
+++ b/tftf/tests/tests-extensive.xml
@@ -10,36 +10,12 @@
<!-- External references to all individual tests files. -->
<!DOCTYPE testsuites [
<!ENTITY tests-psci-extensive SYSTEM "tests-psci-extensive.xml">
-
- <!ENTITY tests-tftf-validation SYSTEM "tests-tftf-validation.xml">
- <!ENTITY tests-boot-req SYSTEM "tests-boot-req.xml">
- <!ENTITY tests-psci SYSTEM "tests-psci.xml">
- <!ENTITY tests-sdei SYSTEM "tests-sdei.xml">
- <!ENTITY tests-rt-instr SYSTEM "tests-runtime-instrumentation.xml">
- <!ENTITY tests-tsp SYSTEM "tests-tsp.xml">
- <!ENTITY tests-el3-pstate SYSTEM "tests-el3-power-state.xml">
- <!ENTITY tests-state-switch SYSTEM "tests-arm-state-switch.xml">
- <!ENTITY tests-cpu-extensions SYSTEM "tests-cpu-extensions.xml">
- <!ENTITY tests-performance SYSTEM "tests-performance.xml">
- <!ENTITY tests-smc SYSTEM "tests-smc.xml">
- <!ENTITY tests-pmu-leakage SYSTEM "tests-pmu-leakage.xml">
+ <!ENTITY tests-timer-stress SYSTEM "tests-timer-stress.xml">
]>
<testsuites>
&tests-psci-extensive;
-
- &tests-tftf-validation;
- &tests-boot-req;
- &tests-psci;
- &tests-sdei;
- &tests-rt-instr;
- &tests-tsp;
- &tests-el3-pstate;
- &tests-state-switch;
- &tests-cpu-extensions;
- &tests-performance;
- &tests-smc;
- &tests-pmu-leakage;
+ &tests-timer-stress;
</testsuites>
diff --git a/tftf/tests/tests-firmware-handoff.mk b/tftf/tests/tests-firmware-handoff.mk
new file mode 100644
index 000000000..515188afa
--- /dev/null
+++ b/tftf/tests/tests-firmware-handoff.mk
@@ -0,0 +1,13 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${TRANSFER_LIST}, 1)
+
+TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
+ test_firmware_handoff.c \
+)
+
+endif
diff --git a/tftf/tests/tests-firmware-handoff.xml b/tftf/tests/tests-firmware-handoff.xml
new file mode 100644
index 000000000..4b4b2a468
--- /dev/null
+++ b/tftf/tests/tests-firmware-handoff.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="Firmware Handoff" description="Validate transfer list managed by firmware handoff framework">
+ <testcase name="Validate transfer list header" function="test_handoff_header" />
+ <testcase name="Validate HW_CONFIG in transfer list" function="test_handoff_dtb_payload" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-hcx.mk b/tftf/tests/tests-hcx.mk
new file mode 100644
index 000000000..ba7cd78f0
--- /dev/null
+++ b/tftf/tests/tests-hcx.mk
@@ -0,0 +1,9 @@
+#
+# Copyright (c) 2021, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/, \
+ extensions/hcx/test_hcx.c \
+)
diff --git a/tftf/tests/tests-hcx.xml b/tftf/tests/tests-hcx.xml
new file mode 100644
index 000000000..5b7f947f3
--- /dev/null
+++ b/tftf/tests/tests-hcx.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2021, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="FEAT_HCX" description="Tests that HCRX_EL2 access has been granted by EL3.">
+ <testcase name="Test access to HCRX_EL2" function="test_feat_hcx_enabled" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-memory-access.mk b/tftf/tests/tests-memory-access.mk
new file mode 100644
index 000000000..13b22416f
--- /dev/null
+++ b/tftf/tests/tests-memory-access.mk
@@ -0,0 +1,30 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TFTF_INCLUDES += \
+ -Iinclude/runtime_services/host_realm_managment
+
+TESTS_SOURCES += tftf/tests/misc_tests/test_invalid_access.c
+
+ifeq (${ARCH},aarch64)
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/host_realm_managment/, \
+ host_realm_rmi.c \
+ host_realm_helper.c \
+ )
+
+endif
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/secure_service/, \
+ ${ARCH}/ffa_arch_helpers.S \
+ ffa_helpers.c \
+ spm_common.c \
+ test_ffa_memory_sharing.c \
+ test_ffa_setup_and_discovery.c \
+ spm_test_helpers.c \
+)
diff --git a/tftf/tests/tests-memory-access.xml b/tftf/tests/tests-memory-access.xml
new file mode 100644
index 000000000..4318cc920
--- /dev/null
+++ b/tftf/tests/tests-memory-access.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2024, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="FF-A Memory Sharing (RME)"
+ description="Test FF-A Memory Sharing ABIs" >
+ <testcase name="Lend Memory to Secure World"
+ function="test_mem_lend_sp" />
+ <testcase name="Lend memory, clear flag set"
+ function="test_mem_share_to_sp_clear_memory"/>
+ <testcase name="Share Memory with Secure World"
+ function="test_mem_share_sp" />
+ <testcase name="Donate Memory to Secure World"
+ function="test_mem_donate_sp"/>
+ <testcase name="Request Share Memory SP-to-SP"
+ function="test_req_mem_share_sp_to_sp" />
+ <testcase name="Request Lend Memory SP-to-SP"
+ function="test_req_mem_lend_sp_to_sp" />
+ <testcase name="Request Donate Memory SP-to-SP"
+ function="test_req_mem_donate_sp_to_sp" />
+ <testcase name="Request Share NS Memory (large PA) SP-to-SP"
+ function="test_req_ns_mem_share_sp_to_sp" />
+ <testcase name="Request Share Memory SP-to-VM"
+ function="test_req_mem_share_sp_to_vm" />
+ <testcase name="Request Lend Memory SP-to-VM"
+ function="test_req_mem_lend_sp_to_vm" />
+ <testcase name="Share forbidden memory with SP"
+ function="test_share_forbidden_ranges" />
+ <testcase name="Donate consecutively"
+ function="test_consecutive_donate" />
+ </testsuite>
+
+ <testsuite name="Invalid memory access" description="Invalid memory access">
+ <testcase name="Access EL3 memory from NS world"
+ function="el3_memory_cannot_be_accessed_in_ns" />
+ <testcase name="Access Secure memory from NS world"
+ function="s_memory_cannot_be_accessed_in_ns" />
+ </testsuite>
+
+ <testsuite name="Invalid memory access with RME extension"
+ description="Invalid memory access with RME extension">
+ <testcase name="Access Realm memory from NS world"
+ function="rl_memory_cannot_be_accessed_in_ns" />
+ <testcase name="Access Secure memory from Realm world"
+ function="s_memory_cannot_be_accessed_in_rl" />
+ <testcase name="Access Root memory from Realm world"
+ function="rt_memory_cannot_be_accessed_in_rl" />
+ <testcase name="Share memory to an SP from a Root region"
+ function="rt_memory_cannot_be_accessed_in_s" />
+ <testcase name="FF-A memory share fails if using realm memory"
+ function="test_ffa_mem_send_sp_realm_memory" />
+ <testcase name="FF-A memory share fail realm memory other constituent"
+ function="test_ffa_mem_lend_sp_realm_memory_separate_constituent" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-nop.mk b/tftf/tests/tests-nop.mk
new file mode 100644
index 000000000..0cad4828f
--- /dev/null
+++ b/tftf/tests/tests-nop.mk
@@ -0,0 +1,9 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
+ test_nop.c \
+ )
diff --git a/tftf/tests/tests-nop.xml b/tftf/tests/tests-nop.xml
new file mode 100644
index 000000000..021d0a656
--- /dev/null
+++ b/tftf/tests/tests-nop.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="NOP tests" description="Dummy tests">
+ <testcase name="Test which calls a NOP function"
+ function="test_nop" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-psci.xml b/tftf/tests/tests-psci.xml
index e2be55724..e9c612bd8 100644
--- a/tftf/tests/tests-psci.xml
+++ b/tftf/tests/tests-psci.xml
@@ -51,6 +51,18 @@
<testcase name="CPU suspend to standby at level 3" function="test_psci_suspend_standby_level3" />
</testsuite>
+ <testsuite name="PSCI CPU Suspend in OSI mode" description="Test PSCI CPU Suspend support in OSI mode">
+ <testcase name="CPU suspend to powerdown at level 0 in OSI mode" function="test_psci_suspend_powerdown_level0_osi" />
+ <testcase name="CPU suspend to powerdown at level 1 in OSI mode" function="test_psci_suspend_powerdown_level1_osi" />
+ <testcase name="CPU suspend to powerdown at level 2 in OSI mode" function="test_psci_suspend_powerdown_level2_osi" />
+ <testcase name="CPU suspend to powerdown at level 3 in OSI mode" function="test_psci_suspend_powerdown_level3_osi" />
+
+ <testcase name="CPU suspend to standby at level 0 in OSI mode" function="test_psci_suspend_standby_level0_osi" />
+ <testcase name="CPU suspend to standby at level 1 in OSI mode" function="test_psci_suspend_standby_level1_osi" />
+ <testcase name="CPU suspend to standby at level 2 in OSI mode" function="test_psci_suspend_standby_level2_osi" />
+ <testcase name="CPU suspend to standby at level 3 in OSI mode" function="test_psci_suspend_standby_level3_osi" />
+ </testsuite>
+
<testsuite name="PSCI STAT" description="Test PSCI STAT support Core level">
<testcase name="for valid composite state CPU suspend" function="test_psci_stat_all_power_states" />
<testcase name="Stats test cases for CPU OFF" function="test_psci_stats_cpu_off" />
diff --git a/tftf/tests/tests-quark.mk b/tftf/tests/tests-quark.mk
deleted file mode 100644
index 0504936b0..000000000
--- a/tftf/tests/tests-quark.mk
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Copyright (c) 2019, Arm Limited. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-TESTS_SOURCES += \
- tftf/tests/runtime_services/secure_service/spci_helpers.c \
- tftf/tests/runtime_services/secure_service/test_quark_request.c
diff --git a/tftf/tests/tests-quark.xml b/tftf/tests/tests-quark.xml
deleted file mode 100644
index 109fa229f..000000000
--- a/tftf/tests/tests-quark.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-
-<!--
- Copyright (c) 2019, Arm Limited. All rights reserved.
-
- SPDX-License-Identifier: BSD-3-Clause
--->
-
-<testsuites>
-
- <testsuite name="Secure Partition Quark tests"
- description="Test related to the Secure Partition Quark">
-
- <testcase name="Send simple request to Quark"
- function="test_quark_request" />
-
- </testsuite>
-
-</testsuites>
diff --git a/tftf/tests/tests-ras-ffh-nested.mk b/tftf/tests/tests-ras-ffh-nested.mk
new file mode 100644
index 000000000..1adcf8043
--- /dev/null
+++ b/tftf/tests/tests-ras-ffh-nested.mk
@@ -0,0 +1,10 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
+ inject_ras_error.S \
+ test_ras_ffh_nested.c \
+)
diff --git a/tftf/tests/tests-ras-ffh-nested.xml b/tftf/tests/tests-ras-ffh-nested.xml
new file mode 100644
index 000000000..8dfb6933b
--- /dev/null
+++ b/tftf/tests/tests-ras-ffh-nested.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="RAS FFH nested" description="RAS errors handled in EL3 as nested exception on top of SMC call">
+ <testcase name="Inject RAS error which gets handled as nested exception during SMC exception" function="test_ras_ffh_nested" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-ras-kfh-reflect.mk b/tftf/tests/tests-ras-kfh-reflect.mk
new file mode 100644
index 000000000..bc8852fe0
--- /dev/null
+++ b/tftf/tests/tests-ras-kfh-reflect.mk
@@ -0,0 +1,10 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
+ inject_ras_error.S \
+ test_ras_kfh_reflect.c \
+)
diff --git a/tftf/tests/tests-ras-kfh-reflect.xml b/tftf/tests/tests-ras-kfh-reflect.xml
new file mode 100644
index 000000000..4150200b1
--- /dev/null
+++ b/tftf/tests/tests-ras-kfh-reflect.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="RAS KFH Reflection" description="RAS errors reflected back from EL3">
+ <testcase name="Inject RAS error which gets reflected back during IRQ handling" function="test_ras_kfh_reflect_irq" />
+ <testcase name="Inject RAS error which gets reflected back during SMC call" function="test_ras_kfh_reflect_sync" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-ras-kfh.mk b/tftf/tests/tests-ras-kfh.mk
new file mode 100644
index 000000000..e79db93cb
--- /dev/null
+++ b/tftf/tests/tests-ras-kfh.mk
@@ -0,0 +1,10 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
+ inject_ras_error.S \
+ test_ras_kfh.c \
+)
diff --git a/tftf/tests/tests-ras-kfh.xml b/tftf/tests/tests-ras-kfh.xml
new file mode 100644
index 000000000..3cfed499e
--- /dev/null
+++ b/tftf/tests/tests-ras-kfh.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="RAS KFH" description="RAS errors handled through Kernel First handling">
+ <testcase name="Inject RAS error and wait for it being handled" function="test_ras_kfh" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-realm-payload.mk b/tftf/tests/tests-realm-payload.mk
new file mode 100644
index 000000000..a6d4d47a2
--- /dev/null
+++ b/tftf/tests/tests-realm-payload.mk
@@ -0,0 +1,45 @@
+#
+# Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ARCH},aarch64)
+TFTF_INCLUDES += \
+ -Iinclude/runtime_services/host_realm_managment
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/realm_payload/, \
+ host_realm_payload_multiple_rec_tests.c \
+ host_realm_payload_tests.c \
+ host_realm_spm.c \
+ host_realm_payload_simd_tests.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/host_realm_managment/, \
+ host_pmuv3.c \
+ host_realm_rmi.c \
+ host_realm_helper.c \
+ host_shared_data.c \
+ rmi_delegate_tests.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/secure_service/, \
+ ${ARCH}/ffa_arch_helpers.S \
+ ffa_helpers.c \
+ spm_common.c \
+ spm_test_helpers.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix lib/heap/, \
+ page_alloc.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix lib/extensions/fpu/, \
+ fpu.c \
+ )
+endif
diff --git a/tftf/tests/tests-realm-payload.xml b/tftf/tests/tests-realm-payload.xml
new file mode 100644
index 000000000..0ecefee14
--- /dev/null
+++ b/tftf/tests/tests-realm-payload.xml
@@ -0,0 +1,103 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="Realm payload at EL1" description="Test Realm EL1 framework capabilities" >
+ <testcase name="Realm EL1 creation and execution test"
+ function="host_test_realm_create_enter" />
+ <testcase name="Multiple Realm EL1 creation and execution test"
+ function="host_test_multiple_realm_create_enter" />
+ <testcase name="Realm payload multi rec multiple cpu"
+ function="host_realm_multi_rec_multiple_cpu" />
+ <testcase name="Realm payload multi rec validations"
+ function="host_realm_multi_rec_multiple_cpu2" />
+ <testcase name="Realm SEA Empty"
+ function="host_realm_sea_empty" />
+ <testcase name="Realm SEA Unprotected"
+ function="host_realm_sea_unprotected" />
+ <testcase name="Realm Abort Unassigned RAM"
+ function="host_realm_abort_unassigned_ram" />
+ <testcase name="Realm Abort Unassigned Destroyed"
+ function="host_realm_abort_unassigned_destroyed" />
+ <testcase name="Realm Abort Assigned destroyed"
+ function="host_realm_abort_assigned_destroyed" />
+ <testcase name="Realm payload multi rec single cpu"
+ function="host_realm_multi_rec_single_cpu" />
+ <testcase name="Realm payload multi rec psci denied"
+ function="host_realm_multi_rec_psci_denied" />
+ <testcase name="Realm payload multi rec force exit on NS IRQ"
+ function="host_realm_multi_rec_exit_irq" />
+ <testcase name="Realm EL1 creation and RSI version"
+ function="host_test_realm_rsi_version" />
+ <testcase name="Realm payload boot"
+ function="host_realm_version_single_cpu" />
+ <testcase name="Realm payload multi CPU request"
+ function="host_realm_version_multi_cpu" />
+ <testcase name="Realm payload Delegate and Undelegate"
+ function="host_realm_delegate_undelegate" />
+ <testcase name="Multi CPU Realm payload Delegate and Undelegate"
+ function="host_realm_delundel_multi_cpu" />
+ <testcase name="Testing delegation fails"
+ function="host_realm_fail_del" />
+ <testcase name="PMUv3 cycle counter functional in Realm"
+ function="host_realm_pmuv3_cycle_works" />
+ <testcase name="PMUv3 event counter functional in Realm"
+ function="host_realm_pmuv3_event_works" />
+ <testcase name="PMUv3 RSI SMC counter preservation"
+ function="host_realm_pmuv3_rmm_preserves" />
+ <testcase name="PMUv3 overflow interrupt"
+ function="host_realm_pmuv3_overflow_interrupt" />
+ <testcase name="Test Secure interrupt can preempt Realm EL1"
+ function="host_realm_sec_interrupt_can_preempt_rl" />
+ <testcase name="Check that FPU state registers context is preserved in RL/SE/NS"
+ function="host_realm_fpu_access_in_rl_ns_se" />
+ <testcase name="Realm request set_ripas"
+ function="host_realm_set_ripas" />
+ <testcase name="Realm reject set_ripas"
+ function="host_realm_reject_set_ripas" />
+ <!-- Test case related to SVE support and SIMD state -->
+ <testcase name="Check RMI reports proper SVE VL"
+ function="host_check_rmi_reports_proper_sve_vl" />
+ <testcase name="Create SVE Realm with invalid VL"
+ function="host_sve_realm_test_invalid_vl" />
+ <testcase name="Create SVE Realm and test ID registers"
+ function="host_sve_realm_cmd_id_registers" />
+ <testcase name="Create non SVE Realm and test ID registers"
+ function="host_non_sve_realm_cmd_id_registers" />
+ <testcase name="Create SVE Realm and check rdvl result"
+ function="host_sve_realm_cmd_rdvl" />
+ <testcase name="Create SVE Realm and probe all supported VLs"
+ function="host_sve_realm_cmd_probe_vl" />
+ <testcase name="Check whether RMM preserves NS ZCR_EL2 register"
+ function="host_sve_realm_check_config_register" />
+ <testcase name="Intermittently switch to Realm while doing NS SVE ops"
+ function="host_sve_realm_check_vectors_operations" />
+ <testcase name="Check if RMM does not leak Realm SVE vector registers"
+ function="host_sve_realm_check_vectors_leaked" />
+ <testcase name="Check if Realm gets undefined abort if it access SVE"
+ function="host_non_sve_realm_check_undef_abort" />
+ <testcase name="Check various SIMD state preserved across NS/RL switch"
+ function="host_and_realm_check_simd" />
+ <!-- Test Realm for SME -->
+ <testcase name="Create Realm and test SME ID registers"
+ function="host_realm_check_sme_id_registers" />
+ <testcase name="Check if Realm gets undefined abort when it access SME"
+ function="host_realm_check_sme_undef_abort" />
+ <testcase name="Check whether RMM preserves NS SME configurations"
+ function="host_realm_check_sme_configs" />
+ <testcase name="Intermittently switch to Realm while NS doing SSVE ops"
+ function="host_sve_realm_check_streaming_vectors_operations" />
+ <!-- Test case related to PAuth -->
+ <testcase name="Check if PAuth keys are preserved in RL/SE/NS"
+ function="host_realm_enable_pauth" />
+ <testcase name="Generate PAuth Fault by overwriting LR"
+ function="host_realm_pauth_fault" />
+ <testcase name="Check if DIT Bit is preserved in RL/NS"
+ function="host_realm_enable_dit" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-rmi-spm.mk b/tftf/tests/tests-rmi-spm.mk
new file mode 100644
index 000000000..735e1911f
--- /dev/null
+++ b/tftf/tests/tests-rmi-spm.mk
@@ -0,0 +1,31 @@
+#
+# Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ARCH},aarch64)
+
+TFTF_INCLUDES += \
+ -Iinclude/runtime_services/host_realm_managment
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/host_realm_managment/, \
+ host_realm_rmi.c \
+ host_realm_helper.c \
+ rmi_spm_tests.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/secure_service/, \
+ ${ARCH}/ffa_arch_helpers.S \
+ ffa_helpers.c \
+ spm_common.c \
+ spm_test_helpers.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix lib/heap/, \
+ page_alloc.c \
+ )
+endif
diff --git a/tftf/tests/tests-rmi-spm.xml b/tftf/tests/tests-rmi-spm.xml
new file mode 100644
index 000000000..1d12b4a35
--- /dev/null
+++ b/tftf/tests/tests-rmi-spm.xml
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="RMI and SPM tests" description="Tests for SPM and RMI delegate/undelegate">
+ <testcase name="Test TFTF can call RMM/TRP and SPM serially on a single core"
+ function="test_spm_rmm_serial_smc" />
+ <testcase name="Test TFTF can call RMM/TRP and SPM parallelly on a multi core"
+ function="test_spm_rmm_parallel_smc" />
+ </testsuite>
+
+</testsuites> \ No newline at end of file
diff --git a/tftf/tests/tests-rng_trap.mk b/tftf/tests/tests-rng_trap.mk
new file mode 100644
index 000000000..2457b0cbd
--- /dev/null
+++ b/tftf/tests/tests-rng_trap.mk
@@ -0,0 +1,9 @@
+#
+# Copyright (c) 2022, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/, \
+ extensions/rng_trap/test_rng_trap.c \
+)
diff --git a/tftf/tests/tests-rng_trap.xml b/tftf/tests/tests-rng_trap.xml
new file mode 100644
index 000000000..1f8cb9055
--- /dev/null
+++ b/tftf/tests/tests-rng_trap.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2022, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="RNG_TRAP" description="Tests that RNDR/RRS instructions trap to EL3 and returns a random number">
+ <testcase name="Test if RNDR instruction traps to el3 and gets a random number" function="test_rndr_rng_trap" />
+ <testcase name="Test if RNDRSS instruction traps to el3 and gets a random number" function="test_rndrrs_rng_trap" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-sdei.mk b/tftf/tests/tests-sdei.mk
index 0c495d99f..e73bfb72b 100644
--- a/tftf/tests/tests-sdei.mk
+++ b/tftf/tests/tests-sdei.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2020, Arm Limited. All rights reserved.
+# Copyright (c) 2020-2021, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -10,4 +10,5 @@ TESTS_SOURCES += \
test_sdei.c \
test_sdei_state.c \
test_sdei_rm_any.c \
+ test_sdei_pstate.c \
)
diff --git a/tftf/tests/tests-sdei.xml b/tftf/tests/tests-sdei.xml
index 147835bc5..38c7c0d0e 100644
--- a/tftf/tests/tests-sdei.xml
+++ b/tftf/tests/tests-sdei.xml
@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2020, Arm Limited. All rights reserved.
+ Copyright (c) 2020-2021, Arm Limited. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-->
@@ -15,6 +15,7 @@
<testcase name="SDEI event signaling: each core signals itself" function="test_sdei_event_signal_serial" />
<testcase name="SDEI event signaling: one core signals all others" function="test_sdei_event_signal_all" />
<testcase name="SDEI event routing all: SPI events routed to all CPUs" function="test_sdei_routing_any" />
+ <testcase name="SDEI event handler pstate testing" function="test_sdei_event_check_pstate" />
</testsuite>
</testsuites>
diff --git a/tftf/tests/tests-single-fault.mk b/tftf/tests/tests-single-fault.mk
index 064186516..86a23c380 100644
--- a/tftf/tests/tests-single-fault.mk
+++ b/tftf/tests/tests-single-fault.mk
@@ -1,10 +1,10 @@
#
-# Copyright (c) 2018, Arm Limited. All rights reserved.
+# Copyright (c) 2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
- inject_serror.S \
+ inject_ras_error.S \
test_single_fault.c \
)
diff --git a/tftf/tests/tests-smcfuzzing.mk b/tftf/tests/tests-smcfuzzing.mk
index 82b6a7c85..2834e4ec0 100644
--- a/tftf/tests/tests-smcfuzzing.mk
+++ b/tftf/tests/tests-smcfuzzing.mk
@@ -1,12 +1,50 @@
#
-# Copyright (c) 2020, Arm Limited. All rights reserved.
+# Copyright (c) 2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
+# Generate random fuzzing seeds
+# If no instance count is provided, default to 1 instance
+# If no seeds are provided, generate them randomly
+# The number of seeds provided must match the instance count
+SMC_FUZZ_INSTANCE_COUNT ?= 1
+SMC_FUZZ_SEEDS ?= $(shell python -c "from random import randint; seeds = [randint(0, 4294967295) for i in range($(SMC_FUZZ_INSTANCE_COUNT))];print(\",\".join(str(x) for x in seeds));")
+SMC_FUZZ_CALLS_PER_INSTANCE ?= 100
+
+# Validate SMC fuzzer parameters
+
+# Instance count must not be zero
+ifeq ($(SMC_FUZZ_INSTANCE_COUNT),0)
+$(error SMC_FUZZ_INSTANCE_COUNT must not be zero!)
+endif
+
+# Calls per instance must not be zero
+ifeq ($(SMC_FUZZ_CALLS_PER_INSTANCE),0)
+$(error SMC_FUZZ_CALLS_PER_INSTANCE must not be zero!)
+endif
+
+# Make sure seed count and instance count match
+TEST_SEED_COUNT = $(shell python -c "print(len(\"$(SMC_FUZZ_SEEDS)\".split(\",\")))")
+ifneq ($(TEST_SEED_COUNT), $(SMC_FUZZ_INSTANCE_COUNT))
+$(error Number of seeds does not match SMC_FUZZ_INSTANCE_COUNT!)
+endif
+
+# Add definitions to TFTF_DEFINES so they can be used in the code
+$(eval $(call add_define,TFTF_DEFINES,SMC_FUZZ_SEEDS))
+$(eval $(call add_define,TFTF_DEFINES,SMC_FUZZ_INSTANCE_COUNT))
+$(eval $(call add_define,TFTF_DEFINES,SMC_FUZZ_CALLS_PER_INSTANCE))
+ifeq ($(MULTI_CPU_SMC_FUZZER),1)
+$(eval $(call add_define,TFTF_DEFINES,MULTI_CPU_SMC_FUZZER))
+endif
+
TESTS_SOURCES += \
$(addprefix smc_fuzz/src/, \
randsmcmod.c \
smcmalloc.c \
fifo3d.c \
+ runtestfunction_helpers.c \
+ sdei_fuzz_helper.c \
+ tsp_fuzz_helper.c \
+ nfifo.c \
)
diff --git a/tftf/tests/tests-spm.mk b/tftf/tests/tests-spm.mk
index e62e03d94..97b3a491d 100644
--- a/tftf/tests/tests-spm.mk
+++ b/tftf/tests/tests-spm.mk
@@ -1,19 +1,46 @@
#
-# Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
+TFTF_INCLUDES += \
+ -Iinclude/runtime_services/host_realm_managment
+
TESTS_SOURCES += \
$(addprefix tftf/tests/runtime_services/secure_service/, \
+ ${ARCH}/ffa_arch_helpers.S \
ffa_helpers.c \
spm_common.c \
+ spm_test_helpers.c \
test_ffa_direct_messaging.c \
- test_ffa_features.c \
test_ffa_interrupts.c \
+ test_ffa_secure_interrupts.c \
test_ffa_memory_sharing.c \
- test_ffa_rxtx_map.c \
- test_ffa_version.c \
- test_spm_cpu_features.c \
+ test_ffa_setup_and_discovery.c \
+ test_ffa_notifications.c \
test_spm_smmu.c \
+ test_ffa_exceptions.c \
+ test_ffa_group0_interrupts.c \
+ )
+
+ifeq (${ARCH},aarch64)
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/secure_service/, \
+ test_spm_simd.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/host_realm_managment/, \
+ host_realm_rmi.c \
+ host_realm_helper.c \
)
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/secure_service/, \
+ test_ffa_smccc.c \
+ test_ffa_smccc_asm.S \
+ )
+
+TESTS_SOURCES += lib/extensions/fpu/fpu.c
+endif
diff --git a/tftf/tests/tests-spm.xml b/tftf/tests/tests-spm.xml
index 32efc161b..09e0fd772 100644
--- a/tftf/tests/tests-spm.xml
+++ b/tftf/tests/tests-spm.xml
@@ -1,33 +1,61 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ Copyright (c) 2018-2023, Arm Limited. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-->
<testsuites>
+ <testsuite name="FF-A Setup and Discovery"
+ description="Test FF-A Setup and Discovery interfaces" >
+ <testcase name="Test FFA_FEATURES"
+ function="test_ffa_features" />
- <testsuite name="FF-A Version"
- description="Test FF-A Version ABI" >
-
- <testcase name="Same FFA version as SPM"
- function="test_ffa_version_equal" />
- <testcase name="Setting bit 31 in input version"
- function="test_ffa_version_bit31"/>
- <testcase name="Bigger FFA version than SPM"
- function="test_ffa_version_bigger" />
- <testcase name="Smaller FFA version than SPM"
- function="test_ffa_version_smaller" />
-
- </testsuite>
+ <testcase name="Same FFA version as SPM"
+ function="test_ffa_version_equal" />
+ <testcase name="Setting bit 31 in input version"
+ function="test_ffa_version_bit31"/>
+ <testcase name="Bigger FFA version than SPM"
+ function="test_ffa_version_bigger" />
+ <testcase name="Smaller FFA version than SPM"
+ function="test_ffa_version_smaller" />
- <testsuite name="FF-A RXTX Mapping"
- description="Test to FF-A RXTX mapping ABI" >
<testcase name="FF-A RXTX Map API success"
function="test_ffa_rxtx_map_success" />
<testcase name="FF-A RXTX Map API consecutive"
function="test_ffa_rxtx_map_fail" />
+ <testcase name="FF-A RXTX Unmap API success"
+ function="test_ffa_rxtx_unmap_success" />
+ <testcase name="FF-A RXTX Unmap API consecutive"
+ function="test_ffa_rxtx_unmap_fail" />
+ <testcase name="FF-A RXTX remap unmapped region success"
+ function="test_ffa_rxtx_map_unmapped_success" />
+ <testcase name="FF-A RXTX unmap SP rxtx buffer"
+ function="test_ffa_rxtx_unmap_fail_if_sp" />
+ <testcase name="Test FFA_SPM_ID_GET"
+ function="test_ffa_spm_id_get" />
+
+ <testcase name="Test FFA_PARTITION_INFO_GET"
+ function="test_ffa_partition_info" />
+ <testcase name="Test FFA_PARTITION_INFO_GET v1.0"
+ function="test_ffa_partition_info_v1_0" />
+ </testsuite>
+
+ <testsuite name="FF-A SMCCC compliance"
+ description="SMCCC compliance" >
+ <testcase name="FF-A callee preserves GP register set per SMCCC"
+ function="test_smccc_callee_preserved" />
+ <testcase name="FF-A callee preserves extended GP register set per SMCCC"
+ function="test_smccc_ext_callee_preserved" />
+ </testsuite>
+
+ <testsuite name="SP exceptions"
+ description="SP exceptions" >
+
+ <testcase name="Access from a SP to a Realm region"
+ function="rl_memory_cannot_be_accessed_in_s" />
+
</testsuite>
<testsuite name="FF-A Direct messaging"
@@ -44,6 +72,14 @@
</testsuite>
+ <testsuite name="FF-A Group0 interrupts"
+ description="Test FF-A Group0 secure interrupt delegation to EL3" >
+ <testcase name="FF-A Group0 secure world"
+ function="test_ffa_group0_interrupt_sp_running" />
+ <testcase name="FF-A Group0 normal world"
+ function="test_ffa_group0_interrupt_in_nwd" />
+ </testsuite>
+
<testsuite name="FF-A Power management"
description="Test FF-A power management" >
<testcase name="FF-A SP hotplug"
@@ -52,8 +88,24 @@
<testsuite name="FF-A Memory Sharing"
description="Test FF-A Memory Sharing ABIs" >
+ <testcase name="Hypervisor share + memory retrieve request"
+ function="test_hypervisor_share_retrieve" />
+ <testcase name="Hypervisor lend + memory retrieve request"
+ function="test_hypervisor_lend_retrieve" />
+ <testcase name="Hypervisor donate + memory retrieve request"
+ function="test_hypervisor_donate_retrieve" />
+ <testcase name="Hypervisor share + memory retrieve request (multiple receivers)"
+ function="test_hypervisor_share_retrieve_multiple_receivers" />
+ <testcase name="Hypervisor lend + memory retrieve request (multiple receivers)"
+ function="test_hypervisor_lend_retrieve_multiple_receivers" />
+ <testcase name="Hypervisor share + memory retrieve request (fragmented)"
+ function="test_hypervisor_share_retrieve_fragmented" />
+ <testcase name="Hypervisor lend + memory retrieve request (fragmented)"
+ function="test_hypervisor_lend_retrieve_fragmented" />
<testcase name="Lend Memory to Secure World"
function="test_mem_lend_sp" />
+ <testcase name="Lend memory, clear flag set"
+ function="test_mem_share_to_sp_clear_memory"/>
<testcase name="Share Memory with Secure World"
function="test_mem_share_sp" />
<testcase name="Donate Memory to Secure World"
@@ -64,29 +116,108 @@
function="test_req_mem_lend_sp_to_sp" />
<testcase name="Request Donate Memory SP-to-SP"
function="test_req_mem_donate_sp_to_sp" />
- </testsuite>
-
- <testsuite name="FF-A features"
- description="Test FFA_FEATURES ABI" >
- <testcase name="Test FFA_FEATURES"
- function="test_ffa_features" />
+ <testcase name="Request Share NS Memory (large PA) SP-to-SP"
+ function="test_req_ns_mem_share_sp_to_sp" />
+ <testcase name="Request Share Memory SP-to-VM"
+ function="test_req_mem_share_sp_to_vm" />
+ <testcase name="Request Lend Memory SP-to-VM"
+ function="test_req_mem_lend_sp_to_vm" />
+ <testcase name="Share forbidden memory with SP"
+ function="test_share_forbidden_ranges" />
+ <testcase name="Donate consecutively"
+ function="test_consecutive_donate" />
</testsuite>
<testsuite name="SIMD,SVE Registers context"
description="Validate context switch between NWd and SWd" >
<testcase name="Check that SIMD registers context is preserved"
function="test_simd_vectors_preserved" />
+ <testcase name="Check that SVE registers context is preserved"
+ function="test_sve_vectors_preserved" />
+ <testcase name="Check that SVE operations in NWd are unaffected by SWd"
+ function="test_sve_vectors_operations" />
</testsuite>
<testsuite name="FF-A Interrupt"
description="Test non-secure Interrupts" >
- <testcase name="Test NS interrupts"
- function="test_ffa_ns_interrupt" />
+<!--
+ <testcase name="Test SPx with NS Interrupt queued"
+ function="test_ffa_ns_interrupt_queued" />
+-->
+ <testcase name="Test SPx with NS Interrupt signaled and SPy with Managed Exit"
+ function="test_ffa_SPx_signaled_SPy_ME" />
+ <testcase name="Test Managed Exit in SP call chain"
+ function="test_ffa_ns_interrupt_managed_exit_chained" />
+ <testcase name="Test SPx with Managed Exit and SPy with NS Interrupt signaled"
+ function="test_ffa_SPx_ME_SPy_signaled" />
+ <testcase name="Test Managed Exit"
+ function="test_ffa_ns_interrupt_managed_exit" />
+ <testcase name="Test NS interrupt Signalable"
+ function="test_ffa_ns_interrupt_signaled" />
+ <testcase name="Test Secure interrupt handling while SP running"
+ function="test_ffa_sec_interrupt_sp_running" />
+ <testcase name="Test Secure interrupt handling while SP waiting"
+ function="test_ffa_sec_interrupt_sp_waiting" />
+ <testcase name="Test Secure interrupt handling while SP blocked"
+ function="test_ffa_sec_interrupt_sp_blocked" />
+ <testcase name="Test Secure interrupt handling while SP1 waiting SP2 running"
+ function="test_ffa_sec_interrupt_sp1_waiting_sp2_running" />
+ <testcase name="Test ESPI Secure interrupt handling"
+ function="test_ffa_espi_sec_interrupt" />
</testsuite>
<testsuite name="SMMUv3 tests"
description="Initiate stage2 translation for streams from upstream peripherals" >
<testcase name="Check DMA command by SMMUv3TestEngine completes"
function="test_smmu_spm" />
+ <testcase name="Check secure peripheral access to a realm region is aborted"
+ function="test_smmu_spm_invalid_access" />
</testsuite>
+
+ <testsuite name="FF-A Notifications"
+ description="Test Notifications functionality" >
+ <testcase name="Notifications interrupts ID retrieval with FFA_FEATURES"
+ function= "test_notifications_retrieve_int_ids" />
+ <testcase name="Notifications bitmap create and destroy"
+ function="test_ffa_notifications_bitmap_create_destroy" />
+ <testcase name="Notifications bitmap destroy not created"
+ function="test_ffa_notifications_destroy_not_created" />
+ <testcase name="Notifications bitmap create after create"
+ function="test_ffa_notifications_create_after_create" />
+ <testcase name="SP Notifications bind and unbind"
+ function="test_ffa_notifications_sp_bind_unbind" />
+ <testcase name="VM Notifications bind and unbind"
+ function="test_ffa_notifications_vm_bind_unbind" />
+ <testcase name="VM Notifications bind NS Sender"
+ function="test_ffa_notifications_vm_bind_vm" />
+ <testcase name="Notifications bind/unbind of bound Notifications"
+ function="test_ffa_notifications_already_bound" />
+ <testcase name="Notifications bind/unbind SPs spoofing receiver"
+ function="test_ffa_notifications_bind_unbind_spoofing" />
+ <testcase name="Notifications zeroed in bind and unbind"
+ function="test_ffa_notifications_bind_unbind_zeroed" />
+ <testcase name="Notifications VM signals SP"
+ function="test_ffa_notifications_vm_signals_sp" />
+ <testcase name="Notifications SP signals SP"
+ function="test_ffa_notifications_sp_signals_sp" />
+ <testcase name="Notifications SP signals VM"
+ function="test_ffa_notifications_sp_signals_vm" />
+ <testcase name="Notifications SP signals SP with immediate SRI"
+ function="test_ffa_notifications_sp_signals_sp_immediate_sri" />
+ <testcase name="Notifications SP signals SP with delayed SRI"
+ function="test_ffa_notifications_sp_signals_sp_delayed_sri" />
+ <testcase name="Notifications unbind while pending"
+ function="test_ffa_notifications_unbind_pending" />
+ <testcase name="Notifications MP SP signals UP SP per-vCPU"
+ function="test_ffa_notifications_mp_sp_signals_up_sp" />
+ <testcase name="Notifications info get no data"
+ function="test_ffa_notifications_info_get_none" />
+ <testcase name="Notifications VM signals SP per-vCPU"
+ function="test_ffa_notifications_vm_signals_sp_per_vcpu" />
+ <testcase name="Notifications SP signals SP per-vCPU"
+ function="test_ffa_notifications_sp_signals_sp_per_vcpu" />
+ <testcase name="Notifications SP signals VM per-vCPU"
+ function="test_ffa_notifications_sp_signals_vm_per_vcpu" />
+ </testsuite>
+
</testsuites>
diff --git a/tftf/tests/tests-standard.mk b/tftf/tests/tests-standard.mk
index c6c9029f3..46157ce1e 100644
--- a/tftf/tests/tests-standard.mk
+++ b/tftf/tests/tests-standard.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -16,13 +16,21 @@ TESTS_MAKEFILE := $(addprefix tftf/tests/, \
tests-sdei.mk \
tests-single-fault.mk \
tests-smc.mk \
- tests-spm.mk \
tests-template.mk \
tests-tftf-validation.mk \
tests-trng.mk \
+ tests-errata_abi.mk \
tests-tsp.mk \
tests-uncontainable.mk \
- tests-debugfs.mk \
+ tests-debugfs.mk \
+)
+
+ifeq (${ARCH},aarch64)
+TESTS_MAKEFILE += $(addprefix tftf/tests/, \
+ tests-spm.mk \
+ tests-realm-payload.mk \
+ tests-rmi-spm.mk \
)
+endif
include ${TESTS_MAKEFILE}
diff --git a/tftf/tests/tests-standard.xml b/tftf/tests/tests-standard.xml
index 8c66cdaf0..d2c2639df 100644
--- a/tftf/tests/tests-standard.xml
+++ b/tftf/tests/tests-standard.xml
@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ Copyright (c) 2018-2023, Arm Limited. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-->
@@ -15,6 +15,7 @@
<!ENTITY tests-sdei SYSTEM "tests-sdei.xml">
<!ENTITY tests-rt-instr SYSTEM "tests-runtime-instrumentation.xml">
<!ENTITY tests-trng SYSTEM "tests-trng.xml">
+ <!ENTITY tests-errata_abi SYSTEM "tests-errata_abi.xml">
<!ENTITY tests-tsp SYSTEM "tests-tsp.xml">
<!ENTITY tests-el3-pstate SYSTEM "tests-el3-power-state.xml">
<!ENTITY tests-state-switch SYSTEM "tests-arm-state-switch.xml">
@@ -24,6 +25,8 @@
<!ENTITY tests-spm SYSTEM "tests-spm.xml">
<!ENTITY tests-pmu-leakage SYSTEM "tests-pmu-leakage.xml">
<!ENTITY tests-debugfs SYSTEM "tests-debugfs.xml">
+ <!ENTITY tests-rmi-spm SYSTEM "tests-rmi-spm.xml">
+ <!ENTITY tests-realm-payload SYSTEM "tests-realm-payload.xml">
]>
<testsuites>
@@ -34,6 +37,7 @@
&tests-sdei;
&tests-rt-instr;
&tests-trng;
+ &tests-errata_abi;
&tests-tsp;
&tests-el3-pstate;
&tests-state-switch;
@@ -43,5 +47,7 @@
&tests-spm;
&tests-pmu-leakage;
&tests-debugfs;
+ &tests-rmi-spm;
+ &tests-realm-payload;
</testsuites>
diff --git a/tftf/tests/tests-tftf-validation.xml b/tftf/tests/tests-tftf-validation.xml
index 932b10e8b..e1e48d96c 100644
--- a/tftf/tests/tests-tftf-validation.xml
+++ b/tftf/tests/tests-tftf-validation.xml
@@ -20,7 +20,6 @@
<testcase name="Verify the timer interrupt generation" function="test_timer_framework_interrupt" />
<testcase name="Target timer to a power down cpu" function="test_timer_target_power_down_cpu" />
<testcase name="Test scenario where multiple CPUs call same timeout" function="test_timer_target_multiple_same_interval" />
- <testcase name="Stress test the timer framework" function="stress_test_timer_framework" />
</testsuite>
</testsuites>
diff --git a/tftf/tests/tests-timer-stress.mk b/tftf/tests/tests-timer-stress.mk
new file mode 100644
index 000000000..50ee7234b
--- /dev/null
+++ b/tftf/tests/tests-timer-stress.mk
@@ -0,0 +1,10 @@
+#
+# Copyright (c) 2022, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/framework_validation_tests/, \
+ test_timer_framework.c \
+ )
diff --git a/tftf/tests/tests-timer-stress.xml b/tftf/tests/tests-timer-stress.xml
new file mode 100644
index 000000000..e461e9070
--- /dev/null
+++ b/tftf/tests/tests-timer-stress.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2022, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="Stress tests" description="Validate all stress tests">
+ <testcase name="Stress test the timer framework" function="stress_test_timer_framework" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-trng.mk b/tftf/tests/tests-trng.mk
index d2842964e..abeb5b544 100644
--- a/tftf/tests/tests-trng.mk
+++ b/tftf/tests/tests-trng.mk
@@ -1,7 +1,10 @@
#
-# Copyright (c) 2021, Arm Limited. All rights reserved.
+# Copyright (c) 2021-2022, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
-TESTS_SOURCES += tftf/tests/runtime_services/standard_service/trng/api_tests/test_trng.c
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/standard_service/, \
+ /trng/api_tests/test_trng.c \
+ )
diff --git a/tftf/tests/tests-tsp.mk b/tftf/tests/tests-tsp.mk
index b1d8b15c7..35ef02af9 100644
--- a/tftf/tests/tests-tsp.mk
+++ b/tftf/tests/tests-tsp.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2022, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -11,4 +11,5 @@ TESTS_SOURCES += \
test_smc_tsp_std_fn_call.c \
test_tsp_fast_smc.c \
test_normal_int_switch.c \
+ test_pstate_after_exception.c \
)
diff --git a/tftf/tests/tests-tsp.xml b/tftf/tests/tests-tsp.xml
index 7e1018ece..55dfbea85 100644
--- a/tftf/tests/tests-tsp.xml
+++ b/tftf/tests/tests-tsp.xml
@@ -1,13 +1,12 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2018, Arm Limited. All rights reserved.
+ Copyright (c) 2018-2022, Arm Limited. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-->
<testsuites>
-
<testsuite name="IRQ support in TSP" description="Test the normal IRQ preemption support in TSP.">
<testcase name="TSP preempt by IRQ and resume" function="tsp_int_and_resume" />
<testcase name="Fast SMC while TSP preempted" function="test_fast_smc_when_tsp_preempted" />
@@ -32,4 +31,8 @@
<testcase name="Stress test TSP functionality" function="test_tsp_fast_smc_operations" />
</testsuite>
+ <testsuite name="TSP PSTATE test" description="Test PSTATE bits are maintained during exception">
+ <testcase name="Test PSTATE bits maintained on exception" function="tsp_check_pstate_maintained_on_exception" />
+ </testsuite>
+
</testsuites>
diff --git a/tftf/tests/tests-uncontainable.mk b/tftf/tests/tests-uncontainable.mk
index 7a4730025..873b43976 100644
--- a/tftf/tests/tests-uncontainable.mk
+++ b/tftf/tests/tests-uncontainable.mk
@@ -1,10 +1,10 @@
#
-# Copyright (c) 2018, Arm Limited. All rights reserved.
+# Copyright (c) 2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
- inject_serror.S \
+ inject_ras_error.S \
test_uncontainable.c \
)
diff --git a/tftf/tests/tests-undef-injection.mk b/tftf/tests/tests-undef-injection.mk
new file mode 100644
index 000000000..e13df173f
--- /dev/null
+++ b/tftf/tests/tests-undef-injection.mk
@@ -0,0 +1,7 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += tftf/tests/misc_tests/test_undef_injection.c
diff --git a/tftf/tests/tests-undef-injection.xml b/tftf/tests/tests-undef-injection.xml
new file mode 100644
index 000000000..0d43cdf02
--- /dev/null
+++ b/tftf/tests/tests-undef-injection.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="UNDEF Injection" description="UNDEF injection from EL3 to lower EL">
+ <testcase name="UNDEF Injection to lower EL"
+ function="test_undef_injection" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-versal.mk b/tftf/tests/tests-versal.mk
new file mode 100644
index 000000000..6717ee53f
--- /dev/null
+++ b/tftf/tests/tests-versal.mk
@@ -0,0 +1,12 @@
+#
+# Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+TESTS_SOURCES += $(addprefix tftf/tests/plat/xilinx/common/, \
+ plat_pm.c \
+)
+
+
+include tftf/tests/tests-standard.mk
+TESTS_SOURCES += $(sort ${TESTS_SOURCES})
diff --git a/tftf/tests/tests-versal.xml b/tftf/tests/tests-versal.xml
new file mode 100644
index 000000000..6c8f51946
--- /dev/null
+++ b/tftf/tests/tests-versal.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<document>
+ <!-- External reference to standard tests files. -->
+ <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="tests-standard.xml" />
+ <testsuites>
+
+ <testsuite name="AMD-Xilinx tests" description="AMD-Xilinx common platform tests" >
+ <testcase name="Read PM API Version" function="test_pmapi_version" />
+ <testcase name="Get Platform Chip ID" function="test_get_chipid" />
+ </testsuite>
+
+ </testsuites>
+</document>
diff --git a/tftf/tests/xlat_lib_v2/xlat_lib_v2_tests.c b/tftf/tests/xlat_lib_v2/xlat_lib_v2_tests.c
index 519ff16b2..c4ffbf9ac 100644
--- a/tftf/tests/xlat_lib_v2/xlat_lib_v2_tests.c
+++ b/tftf/tests/xlat_lib_v2/xlat_lib_v2_tests.c
@@ -163,6 +163,9 @@ static int add_region(unsigned long long base_pa, uintptr_t base_va,
{
int ret;
+ if (size == 0U) {
+ return -EPERM;
+ }
VERBOSE("mmap_add_dynamic_region(0x%llx, 0x%lx, 0x%zx, 0x%x)\n",
base_pa, base_va, size, attr);