Merge "test(fvp): Test trusted key certificate corruption"
diff --git a/el3_payload/Makefile b/el3_payload/Makefile
index 188731c..3ea39fb 100644
--- a/el3_payload/Makefile
+++ b/el3_payload/Makefile
@@ -4,6 +4,8 @@
# SPDX-License-Identifier: BSD-3-Clause
#
+include ../make_helpers/build_macros.mk
+
# Default number of threads per CPU on FVP
FVP_MAX_PE_PER_CPU := 1
@@ -31,6 +33,8 @@
ASFLAGS := -nostdinc -ffreestanding -Wa,--fatal-warnings -Werror
ASFLAGS += -Iplat/${PLAT}/ -I.
+LDFLAGS += $(call ld_option,--no-warn-rwx-segments)
+
PLAT_BUILD_DIR := build/${PLAT}
SOURCES := entrypoint.S spin.S uart.S plat/${PLAT}/platform.S
OBJS := $(patsubst %,$(PLAT_BUILD_DIR)/%,$(notdir $(SOURCES:.S=.o)))
diff --git a/include/common/test_helpers.h b/include/common/test_helpers.h
index 6b44d0a..8cddc72 100644
--- a/include/common/test_helpers.h
+++ b/include/common/test_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -224,6 +224,14 @@
} \
} while (false)
+#define SKIP_TEST_IF_MPAM_NOT_SUPPORTED() \
+ do { \
+ if(!is_feat_mpam_supported()){ \
+ tftf_testcase_printf("ARMv8.4-mpam not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
#ifdef __aarch64__
#define SKIP_TEST_IF_PA_SIZE_LESS_THAN(n) \
do { \
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index 0c64e40..88c873c 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -418,6 +418,9 @@
#define ID_AA64PFR1_RAS_FRAC_WIDTH U(4)
#define ID_AA64PFR1_RASV1P1_SUPPORTED ULL(0x1)
+#define ID_AA64PFR1_MPAM_FRAC_SHIFT U(16)
+#define ID_AA64PFR1_MPAM_FRAC_MASK ULL(0xf)
+
/* ID_PFR1_EL1 definitions */
#define ID_PFR1_VIRTEXT_SHIFT U(12)
#define ID_PFR1_VIRTEXT_MASK U(0xf)
diff --git a/include/lib/aarch64/arch_features.h b/include/lib/aarch64/arch_features.h
index 85f8952..b6d0ce7 100644
--- a/include/lib/aarch64/arch_features.h
+++ b/include/lib/aarch64/arch_features.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -212,6 +212,19 @@
== ID_AA64PFR1_EL1_RNG_TRAP_SUPPORTED);
}
+static inline bool is_feat_mpam_supported(void)
+{
+ /*
+ * If the MPAM version retreived from the Processor Feature registers
+ * is a non-zero value, then MPAM is supported.
+ */
+
+ return (((((read_id_aa64pfr0_el1() >>
+ ID_AA64PFR0_MPAM_SHIFT) & ID_AA64PFR0_MPAM_MASK) << 4) |
+ ((read_id_aa64pfr1_el1() >>
+ ID_AA64PFR1_MPAM_FRAC_SHIFT) & ID_AA64PFR1_MPAM_FRAC_MASK)) != 0U);
+}
+
static inline unsigned int spe_get_version(void)
{
return (unsigned int)((read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT) &
diff --git a/include/lib/transfer_list.h b/include/lib/transfer_list.h
index 9ee1f55..8bf16cf 100644
--- a/include/lib/transfer_list.h
+++ b/include/lib/transfer_list.h
@@ -12,7 +12,7 @@
#include <lib/utils_def.h>
-#define TRANSFER_LIST_SIGNATURE U(0x006ed0ff)
+#define TRANSFER_LIST_SIGNATURE U(0x4a0fb10b)
#define TRANSFER_LIST_VERSION U(0x0001)
// Init value of maximum alignment required by any TE data in the TL
diff --git a/include/runtime_services/cactus_test_cmds.h b/include/runtime_services/cactus_test_cmds.h
index 5f3d0d2..af5d066 100644
--- a/include/runtime_services/cactus_test_cmds.h
+++ b/include/runtime_services/cactus_test_cmds.h
@@ -394,10 +394,12 @@
#define CACTUS_DMA_SMMUv3_CMD (0x534d4d55)
static inline struct ffa_value cactus_send_dma_cmd(
- ffa_id_t source, ffa_id_t dest)
+ ffa_id_t source, ffa_id_t dest, uint32_t operation,
+ uintptr_t base, size_t range, uint32_t attributes)
{
- return cactus_send_cmd(source, dest, CACTUS_DMA_SMMUv3_CMD, 0, 0, 0,
- 0);
+ return cactus_send_cmd(source, dest, CACTUS_DMA_SMMUv3_CMD,
+ (uint64_t)operation, (uint64_t)base,
+ (uint64_t)range, attributes);
}
/*
diff --git a/include/runtime_services/ffa_helpers.h b/include/runtime_services/ffa_helpers.h
index 149969e..0f19827 100644
--- a/include/runtime_services/ffa_helpers.h
+++ b/include/runtime_services/ffa_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -400,7 +400,13 @@
FFA_MEMORY_INNER_SHAREABLE,
};
-typedef uint8_t ffa_memory_access_permissions_t;
+typedef struct {
+ uint8_t data_access : 2;
+ uint8_t instruction_access : 2;
+} ffa_memory_access_permissions_t;
+
+_Static_assert(sizeof(ffa_memory_access_permissions_t) == sizeof(uint8_t),
+ "ffa_memory_access_permissions_t must be 1 byte wide");
/**
* FF-A v1.1 REL0 Table 10.18 memory region attributes descriptor NS Bit 6.
@@ -417,74 +423,20 @@
* This corresponds to table 10.18 of the FF-A v1.1 EAC0 specification, "Memory
* region attributes descriptor".
*/
-typedef uint16_t ffa_memory_attributes_t;
+typedef struct {
+ uint16_t shareability : 2;
+ uint16_t cacheability : 2;
+ uint16_t type : 2;
+ uint16_t security : 1;
+} ffa_memory_attributes_t;
-#define FFA_DATA_ACCESS_OFFSET (0x0U)
-#define FFA_DATA_ACCESS_MASK ((0x3U) << FFA_DATA_ACCESS_OFFSET)
+_Static_assert(sizeof(ffa_memory_attributes_t) == sizeof(uint16_t),
+ "ffa_memory_attributes_t must be 2 bytes wide");
-#define FFA_INSTRUCTION_ACCESS_OFFSET (0x2U)
-#define FFA_INSTRUCTION_ACCESS_MASK ((0x3U) << FFA_INSTRUCTION_ACCESS_OFFSET)
-
-#define FFA_MEMORY_TYPE_OFFSET (0x4U)
-#define FFA_MEMORY_TYPE_MASK ((0x3U) << FFA_MEMORY_TYPE_OFFSET)
-
-#define FFA_MEMORY_SECURITY_OFFSET (0x6U)
-#define FFA_MEMORY_SECURITY_MASK ((0x1U) << FFA_MEMORY_SECURITY_OFFSET)
-
-#define FFA_MEMORY_CACHEABILITY_OFFSET (0x2U)
-#define FFA_MEMORY_CACHEABILITY_MASK ((0x3U) << FFA_MEMORY_CACHEABILITY_OFFSET)
-
-#define FFA_MEMORY_SHAREABILITY_OFFSET (0x0U)
-#define FFA_MEMORY_SHAREABILITY_MASK ((0x3U) << FFA_MEMORY_SHAREABILITY_OFFSET)
-
-#define ATTR_FUNCTION_SET(name, container_type, offset, mask) \
- static inline void ffa_set_##name##_attr(container_type *attr, \
- const enum ffa_##name perm) \
- { \
- *attr = (*attr & ~(mask)) | ((perm << offset) & mask); \
- }
-
-#define ATTR_FUNCTION_GET(name, container_type, offset, mask) \
- static inline enum ffa_##name ffa_get_##name##_attr( \
- container_type attr) \
- { \
- return (enum ffa_##name)((attr & mask) >> offset); \
- }
-
-ATTR_FUNCTION_SET(data_access, ffa_memory_access_permissions_t,
- FFA_DATA_ACCESS_OFFSET, FFA_DATA_ACCESS_MASK)
-ATTR_FUNCTION_GET(data_access, ffa_memory_access_permissions_t,
- FFA_DATA_ACCESS_OFFSET, FFA_DATA_ACCESS_MASK)
-
-ATTR_FUNCTION_SET(instruction_access, ffa_memory_access_permissions_t,
- FFA_INSTRUCTION_ACCESS_OFFSET, FFA_INSTRUCTION_ACCESS_MASK)
-ATTR_FUNCTION_GET(instruction_access, ffa_memory_access_permissions_t,
- FFA_INSTRUCTION_ACCESS_OFFSET, FFA_INSTRUCTION_ACCESS_MASK)
-
-ATTR_FUNCTION_SET(memory_type, ffa_memory_attributes_t, FFA_MEMORY_TYPE_OFFSET,
- FFA_MEMORY_TYPE_MASK)
-ATTR_FUNCTION_GET(memory_type, ffa_memory_attributes_t, FFA_MEMORY_TYPE_OFFSET,
- FFA_MEMORY_TYPE_MASK)
-
-ATTR_FUNCTION_SET(memory_security, ffa_memory_attributes_t,
- FFA_MEMORY_SECURITY_OFFSET, FFA_MEMORY_SECURITY_MASK)
-ATTR_FUNCTION_GET(memory_security, ffa_memory_attributes_t,
- FFA_MEMORY_SECURITY_OFFSET, FFA_MEMORY_SECURITY_MASK)
-
-ATTR_FUNCTION_SET(memory_cacheability, ffa_memory_attributes_t,
- FFA_MEMORY_CACHEABILITY_OFFSET, FFA_MEMORY_CACHEABILITY_MASK)
-ATTR_FUNCTION_GET(memory_cacheability, ffa_memory_attributes_t,
- FFA_MEMORY_CACHEABILITY_OFFSET, FFA_MEMORY_CACHEABILITY_MASK)
-
-ATTR_FUNCTION_SET(memory_shareability, ffa_memory_attributes_t,
- FFA_MEMORY_SHAREABILITY_OFFSET, FFA_MEMORY_SHAREABILITY_MASK)
-ATTR_FUNCTION_GET(memory_shareability, ffa_memory_attributes_t,
- FFA_MEMORY_SHAREABILITY_OFFSET, FFA_MEMORY_SHAREABILITY_MASK)
-
-#define FFA_MEMORY_HANDLE_ALLOCATOR_MASK \
- ((ffa_memory_handle_t)(UINT64_C(1) << 63))
-#define FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR \
- ((ffa_memory_handle_t)(UINT64_C(1) << 63))
+#define FFA_MEMORY_HANDLE_ALLOCATOR_MASK UINT64_C(1)
+#define FFA_MEMORY_HANDLE_ALLOCATOR_SHIFT 63U
+#define FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR UINT64_C(1)
+#define FFA_MEMORY_HANDLE_ALLOCATOR_SPMC UINT64_C(0)
#define FFA_MEMORY_HANDLE_INVALID (~UINT64_C(0))
/**
@@ -573,7 +525,11 @@
#define FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE ((0x3U) << 3)
/** The maximum number of recipients a memory region may be sent to. */
-#define MAX_MEM_SHARE_RECIPIENTS 1U
+#define MAX_MEM_SHARE_RECIPIENTS 2U
+
+struct ffa_memory_access_impdef {
+ uint64_t val[2];
+};
/**
* This corresponds to table "Endpoint memory access descriptor" of the FFA 1.0
@@ -586,6 +542,8 @@
* an `ffa_composite_memory_region` struct.
*/
uint32_t composite_memory_region_offset;
+ /* Space for implementation defined information */
+ struct ffa_memory_access_impdef impdef;
uint64_t reserved_0;
};
@@ -656,6 +614,53 @@
return ffa_assemble_handle(r.arg2, r.arg3);
}
+static inline ffa_memory_handle_t ffa_frag_handle(struct ffa_value r)
+{
+ return ffa_assemble_handle(r.arg1, r.arg2);
+}
+
+static inline ffa_id_t ffa_frag_sender(struct ffa_value args)
+{
+ return (args.arg4 >> 16) & 0xffff;
+}
+
+/**
+ * To maintain forwards compatability we can't make assumptions about the size
+ * of the endpoint memory access descriptor so provide a helper function
+ * to get a receiver from the receiver array using the memory access descriptor
+ * size field from the memory region descriptor struct.
+ * Returns NULL if we cannot return the receiver.
+ */
+static inline struct ffa_memory_access *ffa_memory_region_get_receiver(
+ struct ffa_memory_region *memory_region, uint32_t receiver_index)
+{
+ uint32_t memory_access_desc_size =
+ memory_region->memory_access_desc_size;
+
+ if (receiver_index >= memory_region->receiver_count) {
+ return NULL;
+ }
+
+ /*
+ * Memory access descriptor size cannot be greater than the size of
+ * the memory access descriptor defined by the current FF-A version.
+ */
+ if (memory_access_desc_size > sizeof(struct ffa_memory_access)) {
+ return NULL;
+ }
+
+ /* Check we cannot use receivers offset to cause overflow. */
+ if (memory_region->receivers_offset !=
+ sizeof(struct ffa_memory_region)) {
+ return NULL;
+ }
+
+ return (struct ffa_memory_access *)((uint8_t *)memory_region +
+ memory_region->receivers_offset +
+ (receiver_index *
+ memory_access_desc_size));
+}
+
/**
* Gets the `ffa_composite_memory_region` for the given receiver from an
* `ffa_memory_region`, or NULL if it is not valid.
@@ -689,23 +694,31 @@
uint32_t ffa_memory_retrieve_request_init(
struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
- ffa_id_t sender, ffa_id_t receiver, uint32_t tag,
- ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
- enum ffa_instruction_access instruction_access,
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count, uint32_t tag, ffa_memory_region_flags_t flags,
enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
enum ffa_memory_shareability shareability);
+void ffa_hypervisor_retrieve_request_init(struct ffa_memory_region *region,
+ ffa_memory_handle_t handle);
+
uint32_t ffa_memory_region_init(
struct ffa_memory_region *memory_region, size_t memory_region_max_size,
- ffa_id_t sender, ffa_id_t receiver,
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count,
const struct ffa_memory_region_constituent constituents[],
uint32_t constituent_count, uint32_t tag,
- ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
- enum ffa_instruction_access instruction_access,
- enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
+ ffa_memory_region_flags_t flags, enum ffa_memory_type type,
+ enum ffa_memory_cacheability cacheability,
enum ffa_memory_shareability shareability, uint32_t *total_length,
uint32_t *fragment_length);
+uint32_t ffa_memory_fragment_init(
+ struct ffa_memory_region_constituent *fragment,
+ size_t fragment_max_size,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t *fragment_length);
+
static inline ffa_id_t ffa_dir_msg_dest(struct ffa_value val) {
return (ffa_id_t)val.arg1 & U(0xFFFF);
}
@@ -757,6 +770,10 @@
uint32_t fragment_length);
struct ffa_value ffa_mem_relinquish(void);
struct ffa_value ffa_mem_reclaim(uint64_t handle, uint32_t flags);
+struct ffa_value ffa_mem_frag_rx(ffa_memory_handle_t handle,
+ uint32_t fragment_length);
+struct ffa_value ffa_mem_frag_tx(ffa_memory_handle_t handle,
+ uint32_t fragment_length);
struct ffa_value ffa_notification_bitmap_create(ffa_id_t vm_id,
ffa_vcpu_count_t vcpu_count);
struct ffa_value ffa_notification_bitmap_destroy(ffa_id_t vm_id);
@@ -776,6 +793,13 @@
struct ffa_value ffa_partition_info_get_regs(const struct ffa_uuid uuid,
const uint16_t start_index,
const uint16_t tag);
+
+struct ffa_memory_access ffa_memory_access_init(
+ ffa_id_t receiver_id, enum ffa_data_access data_access,
+ enum ffa_instruction_access instruction_access,
+ ffa_memory_receiver_flags_t flags,
+ struct ffa_memory_access_impdef *impdef);
+
#endif /* __ASSEMBLY__ */
#endif /* FFA_HELPERS_H */
diff --git a/include/runtime_services/ffa_svc.h b/include/runtime_services/ffa_svc.h
index 3abd21f..226bbd1 100644
--- a/include/runtime_services/ffa_svc.h
+++ b/include/runtime_services/ffa_svc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -86,6 +86,8 @@
#define FFA_FNUM_MEM_RETRIEVE_RESP U(0x75)
#define FFA_FNUM_MEM_RELINQUISH U(0x76)
#define FFA_FNUM_MEM_RECLAIM U(0x77)
+#define FFA_FNUM_MEM_FRAG_RX U(0x7A)
+#define FFA_FNUM_MEM_FRAG_TX U(0x7B)
#define FFA_FNUM_NORMAL_WORLD_RESUME U(0x7C)
/* FF-A v1.1 */
@@ -134,6 +136,8 @@
#define FFA_MEM_RETRIEVE_RESP FFA_FID(SMC_32, FFA_FNUM_MEM_RETRIEVE_RESP)
#define FFA_MEM_RELINQUISH FFA_FID(SMC_32, FFA_FNUM_MEM_RELINQUISH)
#define FFA_MEM_RECLAIM FFA_FID(SMC_32, FFA_FNUM_MEM_RECLAIM)
+#define FFA_MEM_FRAG_RX FFA_FID(SMC_32, FFA_FNUM_MEM_FRAG_RX)
+#define FFA_MEM_FRAG_TX FFA_FID(SMC_32, FFA_FNUM_MEM_FRAG_TX)
#define FFA_NOTIFICATION_BITMAP_CREATE \
FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_BITMAP_CREATE)
#define FFA_NOTIFICATION_BITMAP_DESTROY \
diff --git a/include/runtime_services/host_realm_managment/host_shared_data.h b/include/runtime_services/host_realm_managment/host_shared_data.h
index 57af48d..8549512 100644
--- a/include/runtime_services/host_realm_managment/host_shared_data.h
+++ b/include/runtime_services/host_realm_managment/host_shared_data.h
@@ -45,6 +45,7 @@
REALM_SLEEP_CMD = 1U,
REALM_LOOP_CMD,
REALM_MULTIPLE_REC_PSCI_DENIED_CMD,
+ REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
REALM_GET_RSI_VERSION,
REALM_PMU_CYCLE,
REALM_PMU_EVENT,
diff --git a/include/runtime_services/spm_common.h b/include/runtime_services/spm_common.h
index 0c8ade1..ad2ba08 100644
--- a/include/runtime_services/spm_common.h
+++ b/include/runtime_services/spm_common.h
@@ -109,8 +109,11 @@
*/
bool memory_retrieve(struct mailbox_buffers *mb,
struct ffa_memory_region **retrieved, uint64_t handle,
- ffa_id_t sender, ffa_id_t receiver,
- ffa_memory_region_flags_t flags, uint32_t mem_func);
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count, ffa_memory_region_flags_t flags);
+
+bool hypervisor_retrieve_request(struct mailbox_buffers *mb, uint64_t handle,
+ void *out, uint32_t out_size);
/**
* Helper to conduct a memory relinquish. The caller is usually the receiver,
@@ -120,23 +123,31 @@
ffa_id_t id);
ffa_memory_handle_t memory_send(
- struct ffa_memory_region *memory_region, uint32_t mem_func,
- uint32_t fragment_length, uint32_t total_length, struct ffa_value *ret);
+ void *send_buffer, uint32_t mem_func,
+ const struct ffa_memory_region_constituent *constituents,
+ uint32_t constituent_count, uint32_t remaining_constituent_count,
+ uint32_t fragment_length, uint32_t total_length,
+ struct ffa_value *ret);
ffa_memory_handle_t memory_init_and_send(
- struct ffa_memory_region *memory_region, size_t memory_region_max_size,
- ffa_id_t sender, ffa_id_t receiver,
- const struct ffa_memory_region_constituent* constituents,
+ void *send_buffer, size_t memory_region_max_size, ffa_id_t sender,
+ struct ffa_memory_access receivers[], uint32_t receiver_count,
+ const struct ffa_memory_region_constituent *constituents,
uint32_t constituents_count, uint32_t mem_func, struct ffa_value *ret);
bool ffa_partition_info_helper(struct mailbox_buffers *mb,
- const struct ffa_uuid uuid,
- const struct ffa_partition_info *expected,
- const uint16_t expected_size);
+ const struct ffa_uuid uuid,
+ const struct ffa_partition_info *expected,
+ const uint16_t expected_size);
bool enable_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest);
bool disable_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest);
bool ffa_partition_info_regs_helper(const struct ffa_uuid uuid,
const struct ffa_partition_info *expected,
const uint16_t expected_size);
+
+struct ffa_memory_access ffa_memory_access_init_permissions_from_mem_func(
+ ffa_id_t receiver_id,
+ uint32_t mem_func);
+
#endif /* SPM_COMMON_H */
diff --git a/plat/xilinx/versal/tests_to_skip.txt b/plat/xilinx/versal/tests_to_skip.txt
index fddf331..b430058 100644
--- a/plat/xilinx/versal/tests_to_skip.txt
+++ b/plat/xilinx/versal/tests_to_skip.txt
@@ -39,8 +39,14 @@
PSCI CPU ON OFF Stress Tests/Repeated hotplug of all cores to stress test CPU_ON and CPU_OFF
PSCI CPU ON OFF Stress Tests/Random hotplug cores in a large iteration to stress boot path code
- #TESTS: TSP
- IRQ support in TSP/Resume preempted STD SMC after PSCI CPU OFF/ON cycle
- IRQ support in TSP/Resume preempted STD SMC after PSCI SYSTEM SUSPEND
- IRQ support in TSP/Resume preempted STD SMC
- TSP PSTATE test
+#TESTS: TSP
+IRQ support in TSP/Resume preempted STD SMC after PSCI CPU OFF/ON cycle
+IRQ support in TSP/Resume preempted STD SMC after PSCI SYSTEM SUSPEND
+IRQ support in TSP/Resume preempted STD SMC
+TSP PSTATE test
+
+#TESTS: runtime-instrumentation
+Runtime Instrumentation Validation
+
+#TESTS: debugfs
+DebugFS
diff --git a/plat/xilinx/versal_net/tests_to_skip.txt b/plat/xilinx/versal_net/tests_to_skip.txt
index a45426f..c07c748 100644
--- a/plat/xilinx/versal_net/tests_to_skip.txt
+++ b/plat/xilinx/versal_net/tests_to_skip.txt
@@ -67,3 +67,9 @@
IRQ support in TSP/Resume preempted STD SMC from other CPUs
IRQ support in TSP/Resume preempted STD SMC after PSCI CPU OFF/ON cycle
IRQ support in TSP/Resume preempted STD SMC after PSCI SYSTEM SUSPEND
+
+#TESTS: runtime-instrumentation
+Runtime Instrumentation Validation
+
+#TESTS: debugfs
+DebugFS
diff --git a/realm/include/realm_tests.h b/realm/include/realm_tests.h
index 5caea8c..3016a4d 100644
--- a/realm/include/realm_tests.h
+++ b/realm/include/realm_tests.h
@@ -23,6 +23,7 @@
bool test_realm_sve_cmp_regs(void);
bool test_realm_sve_undef_abort(void);
bool test_realm_multiple_rec_psci_denied_cmd(void);
+bool test_realm_multiple_rec_multiple_cpu_cmd(void);
bool test_realm_sme_read_id_registers(void);
bool test_realm_sme_undef_abort(void);
diff --git a/realm/realm_multiple_rec.c b/realm/realm_multiple_rec.c
index abd166b..c584cd4 100644
--- a/realm/realm_multiple_rec.c
+++ b/realm/realm_multiple_rec.c
@@ -22,6 +22,7 @@
#define CXT_ID_MAGIC 0x100
static uint64_t is_secondary_cpu_booted;
+static spinlock_t lock;
static void rec1_handler(u_register_t cxt_id)
{
@@ -31,7 +32,9 @@
realm_printf("Wrong cxt_id\n");
rsi_exit_to_host(HOST_CALL_EXIT_FAILED_CMD);
}
+ spin_lock(&lock);
is_secondary_cpu_booted++;
+ spin_unlock(&lock);
realm_cpu_off();
}
@@ -67,3 +70,48 @@
}
return true;
}
+
+bool test_realm_multiple_rec_multiple_cpu_cmd(void)
+{
+ unsigned int i = 1U, rec_count;
+ u_register_t ret;
+
+ realm_printf("Realm: running on CPU = 0x%lx\n", read_mpidr_el1() & MPID_MASK);
+ rec_count = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
+
+ /* Check CPU_ON is supported */
+ ret = realm_psci_features(SMC_PSCI_CPU_ON);
+ if (ret != PSCI_E_SUCCESS) {
+ realm_printf("SMC_PSCI_CPU_ON not supported\n");
+ return false;
+ }
+
+ for (unsigned int j = 1U; j < rec_count; j++) {
+ ret = realm_cpu_on(j, (uintptr_t)rec1_handler, CXT_ID_MAGIC + j);
+ if (ret != PSCI_E_SUCCESS) {
+ realm_printf("SMC_PSCI_CPU_ON failed %d.\n", j);
+ return false;
+ }
+ }
+
+ /* Exit to host to allow host to run all CPUs */
+ rsi_exit_to_host(HOST_CALL_EXIT_SUCCESS_CMD);
+ /* wait for all CPUs to come up */
+ while (is_secondary_cpu_booted != rec_count - 1U) {
+ waitms(200);
+ }
+
+ /* wait for all CPUs to turn off */
+ while (i < rec_count) {
+ ret = realm_psci_affinity_info(i, MPIDR_AFFLVL0);
+ if (ret != PSCI_STATE_OFF) {
+ /* wait and query again */
+ realm_printf(" CPU %d is not off\n", i);
+ waitms(200);
+ continue;
+ }
+ i++;
+ }
+ realm_printf("All CPU are off\n");
+ return true;
+}
diff --git a/realm/realm_payload_main.c b/realm/realm_payload_main.c
index 5c488ee..ddaa3cb 100644
--- a/realm/realm_payload_main.c
+++ b/realm/realm_payload_main.c
@@ -156,6 +156,8 @@
break;
case REALM_MULTIPLE_REC_PSCI_DENIED_CMD:
test_succeed = test_realm_multiple_rec_psci_denied_cmd();
+ case REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD:
+ test_succeed = test_realm_multiple_rec_multiple_cpu_cmd();
break;
case REALM_PAUTH_SET_CMD:
test_succeed = test_realm_pauth_set_cmd();
diff --git a/realm/realm_psci.c b/realm/realm_psci.c
index 2a5b951..a4a287b 100644
--- a/realm/realm_psci.c
+++ b/realm/realm_psci.c
@@ -74,15 +74,16 @@
void realm_secondary_entrypoint(u_register_t cxt_id)
{
- u_register_t my_mpidr;
+ u_register_t my_mpidr, id;
secondary_ep_t ep;
my_mpidr = read_mpidr_el1() & MPID_MASK;
ep = entrypoint[my_mpidr];
+ id = context_id[my_mpidr];
if (ep != NULL) {
entrypoint[my_mpidr] = NULL;
context_id[my_mpidr] = 0;
- (ep)(context_id[my_mpidr]);
+ (ep)(id);
} else {
/*
* Host can execute Rec directly without CPU_ON
diff --git a/spm/cactus/cactus.mk b/spm/cactus/cactus.mk
index be8997e..4e86e3d 100644
--- a/spm/cactus/cactus.mk
+++ b/spm/cactus/cactus.mk
@@ -49,7 +49,7 @@
) \
$(addprefix spm/cactus/cactus_tests/, \
cactus_message_loop.c \
- cactus_test_cpu_features.c \
+ cactus_test_simd.c \
cactus_test_direct_messaging.c \
cactus_test_interrupts.c \
cactus_test_memory_sharing.c \
diff --git a/spm/cactus/cactus_main.c b/spm/cactus/cactus_main.c
index 8ce132c..19a89f5 100644
--- a/spm/cactus/cactus_main.c
+++ b/spm/cactus/cactus_main.c
@@ -215,14 +215,14 @@
init_xlat_tables();
}
-static void register_secondary_entrypoint(void)
+static struct ffa_value register_secondary_entrypoint(void)
{
struct ffa_value args;
args.fid = FFA_SECONDARY_EP_REGISTER_SMC64;
args.arg1 = (u_register_t)&secondary_cold_entry;
- ffa_service_call(&args);
+ return ffa_service_call(&args);
}
void __dead2 cactus_main(bool primary_cold_boot,
@@ -315,7 +315,16 @@
cactus_print_memory_layout(ffa_id);
- register_secondary_entrypoint();
+ ret = register_secondary_entrypoint();
+
+ /* FFA_SECONDARY_EP_REGISTER interface is not supported for UP SP. */
+ if (ffa_id == (SPM_VM_ID_FIRST + 2)) {
+ expect(ffa_func_id(ret), FFA_ERROR);
+ expect(ffa_error_code(ret), FFA_ERROR_NOT_SUPPORTED);
+ } else {
+ expect(ffa_func_id(ret), FFA_SUCCESS_SMC32);
+ }
+
discover_managed_exit_interrupt_id();
register_maintenance_interrupt_handlers();
diff --git a/spm/cactus/cactus_tests/SMMUv3TestEngine.h b/spm/cactus/cactus_tests/SMMUv3TestEngine.h
new file mode 100644
index 0000000..b168d50
--- /dev/null
+++ b/spm/cactus/cactus_tests/SMMUv3TestEngine.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2015-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* -*- C -*-
+ *
+ * Copyright 2015 ARM Limited. All rights reserved.
+ */
+
+#ifndef ARM_INCLUDE_SMMUv3TestEngine_h
+#define ARM_INCLUDE_SMMUv3TestEngine_h
+
+#include <inttypes.h>
+
+///
+/// Notes on interfacing to PCIe
+/// ----------------------------
+///
+/// MSIAddress and MSIData are held in the MSI Table that is found by a BAR.
+///
+/// So if operating under PCIe then MSIAddress should be '1' and MSIData is
+/// interpreted as the vector to use (0..2048). If MSIAddress is not '0' or '1'
+/// then the frame is misconfigured.
+///
+/// StreamID is not run-time assignable as it is an attribute of the topology of
+/// the system.
+///
+/// In PCIe, then we need multiple instances of the engine and it shall occupy
+/// one Function.
+///
+/// Each BAR is 64 bits so the three BARs are:
+/// * BAR0 is going to point to a set of register frames, at least 128 KiB
+/// * BAR1/2 are MSI-X vector/pending bit array (PBA).
+///
+
+
+///
+/// The engine consists of a series of contiguous pairs of 64 KiB pages, each
+/// page consists of a series of frames. The frames in the first page (User
+/// Page) are expected to be able to be exposed to a low privileged piece of SW,
+/// whilst the second page (Privileged Page) is expected to be controlled by a
+/// higher level of SW.
+///
+/// Examples:
+/// 1) User Page controlled by EL1
+/// Privileged Page controlled by EL2
+/// 2) User Page controlled by EL0
+/// Privileged Page controlled by EL1
+///
+/// The engine can have an unlimited number of pairs.
+///
+/// Each pair of pages are full of register frames. The frames are the same
+/// size in both and frame N in the User page corresponds to frame N in the
+/// Privileged page.
+///
+/// The work load is setup by filling out all the non-cmd fields and then
+/// writing to cmd the command code. If Device-nGnR(n)E is used then no
+/// explicit barrier instruction is required.
+///
+/// When the work has finished then the engine sets cmd to ENGINE_HALTED or
+/// ENGINE_ERROR depending on if the engine encountered an error.
+///
+/// If the command was run then an MSI will be generated if msiaddress != 0,
+/// independent of if there was an error or not. If the MSI abort then
+/// uctrl.MSI_ABORTED is set.
+///
+/// If the frame/command was invalid for some reason then no MSI will be
+/// generated under the assumption that it can't trust the msiaddress field and
+/// ENGINE_FRAME_MISCONFIGURED is read out of cmd. Thus the user should write
+/// the command and then immediately read to see if it is in the
+/// ENGINE_FRAME_MISCONFIGURED state. It is guaranteed that that a read of cmd
+/// after writing cmd will immediately return ENGINE_FRAME_MISCONFIGURED if the
+/// command was invalid.
+///
+/// If the engine is not in the ENGINE_HALTED, ENGINE_ERROR or
+/// ENGINE_FRAME_MISCONFIGURED state then any writes are ignored.
+///
+/// As this is a model-only device then the error diagnostics are crude as it is
+/// expected that a verbose error trace stream will come from the model!
+///
+/// Most of the work-loads can be seeded to do work in a random order with
+/// random transaction sizes. The exact specification of the order and
+/// transaction size are TBD. It is intended that the algorithm used is
+/// specified so that you can work out the order that it should be done in.
+///
+/// The device can issue multiple outstanding transactions for each work-load.
+///
+/// The device will accept any size access for all fields except for cmd.
+///
+/// If a single burst access crosses the boundary of a user_frame the result is
+/// UNPREDICTABLE. From a programmer's perspective, then you can use any way of
+/// writing to within the same frame. However, you should only write to cmd_
+/// separately with a single 32 bit access.
+///
+/// Whilst running the whole frame is write-ignored and the unspecified values
+/// of udata and pdata are UNKNOWN.
+///
+/// The begin, end_incl, stride and seed are interpreted as follows:
+///
+/// * if [begin & ~7ull, end_incl | 7ull] == [0, ~0ull], ENGINE_FRAME_MISCONFIGURED
+/// * such a huge range is not supported for any stride!
+/// * stride == 0, ENGINE_FRAME_MISCONFIGURED
+/// * stride == 1, then the range operated on is [begin, end_incl]
+/// * stride is a multiple of 8
+/// * single 64 bit transfers are performed
+/// * the addresses used are:
+/// (begin & ~7ull) + n * stride for n = 0..N
+/// where the last byte accessed is <= (end_incl | 7)
+/// * for any other value of stride, ENGINE_FRAME_MISCONFIGURED
+/// * if stride > max(8, end_incl - begin + 1) then only a single
+/// element is transferred.
+/// * seed == 0 then the sequence of operation is n = 0, 1, 2, .. N
+/// though multiple in flight transactions could alter this order.
+/// * seed == ~0u then the sequence is n = N, N-1, N-2, .. 0
+/// * seed anything else then sequence randomly pulls one off the front
+/// or the back of the range.
+///
+/// The random number generator R is defined as:
+inline uint32_t testengine_random(uint64_t* storage_)
+{
+ *storage_ = (
+ *storage_ * 0x0005deecE66Dull + 0xB
+ ) & 0xffffFFFFffffull;
+ uint32_t const t = uint32_t((*storage_ >> 17 /* NOTE */) & 0x7FFFffff);
+
+ //
+ // Construct the topmost bit by running the generator again and
+ // choosing a bit from somewhere
+ //
+ *storage_ = (
+ *storage_ * 0x0005deecE66Dull + 0xB
+ ) & 0xffffFFFFffffull;
+ uint32_t const ret = uint32_t(t | (*storage_ & 0x80000000ull));
+ return ret;
+}
+
+// Seeding storage from the 'seed' field is:
+inline void testengine_random_seed_storage(uint64_t* storage_, uint32_t seed_)
+{
+ *storage_ = uint64_t(seed_) << 16 | 0x330e;
+}
+
+
+/// 128 bytes
+struct user_frame_t
+{
+ // -- 0 --
+ uint32_t cmd;
+ uint32_t uctrl;
+
+ // -- 1 --
+ // These keep track of how much work is being done by the engine.
+ uint32_t count_of_transactions_launched;
+ uint32_t count_of_transactions_returned;
+
+ // -- 2 --
+ // If operating under PCIe then msiaddress should be either 1 (send MSI-X)
+ // or 0 (don't send). The MSI-X to send is in msidata.
+ uint64_t msiaddress;
+
+ // -- 3 --
+ // If operating under PCIe then msidata is the MSI-X index in the MSI-X
+ // vector table to send (0..2047)
+ //
+ // If operating under PCIe then msiattr has no effect.
+ uint32_t msidata;
+ uint32_t msiattr; // encoded same bottom half of attributes field
+
+ //
+ // source and destination attributes, including NS attributes if SSD-s
+ // Includes 'instruction' attributes so the work load can look like
+ // instruction accesses.
+ //
+ // Each halfword encodes:
+ // 15:14 shareability 0..2 (nsh/ish/osh) (ACE encoding), ignored if a device type
+ // 13 outer transient, ignored unless outer ACACHE is cacheable
+ // 12 inner transient, ignored unless inner ACACHE is cacheable
+ // 10:8 APROT (AMBA encoding)
+ // 10 InD -- Instruction not Data
+ // 9 NS -- Non-secure
+ // 8 PnU -- Privileged not User
+ // 7:4 ACACHE encoding of outer
+ // 3:0 if 7:4 == {0,1}
+ // // Device type
+ // 3 Gathering if ACACHE is 1, ignored otherwise
+ // 2 Reordering if ACACHE is 1, ignored otherwise
+ // else
+ // // Normal type
+ // ACACHE encoding of inner
+ //
+ // ACACHE encodings:
+ // 0000 -- Device-nGnRnE
+ // 0001 -- Device-(n)G(n)RE -- depending on bits [3:2]
+ // 0010 -- NC-NB (normal non-cacheable non-bufferable)
+ // 0011 -- NC
+ // 0100 -- illegal
+ // 0101 -- illegal
+ // 0110 -- raWT
+ // 0111 -- raWB
+ // 1000 -- illegal
+ // 1001 -- illegal
+ // 1010 -- waWT
+ // 1011 -- waWB
+ // 1100 -- illegal
+ // 1101 -- illegal
+ // 1110 -- rawaWT
+ // 1111 -- rawaWB
+ //
+ // NOTE that the meaning of the ACACHE encodings are dependent on if it is a
+ // read or a write. AMBA can't encode directly the 'no-allocate cacheable'
+ // and you have to set the 'other' allocation hint. So for example, a read
+ // naWB has to be encoded as waWB. A write naWB has to be encoded as raWB,
+ // etc.
+ //
+ // Lowest halfword are 'source' attributes.
+ // Highest halfword are 'destination' attributes.
+ //
+ // NOTE that you can make an non-secure stream output a secure transaction
+ // -- the SMMU should sort it out.
+ //
+
+ // -- 4 --
+ // Under PCIe then a real Function does not have control over the attributes
+ // of the transactions that it makes. However, for testing purposes of the
+ // SMMU then we allow its attributes to be specified (and magically
+ // transport them over PCIe).
+ uint32_t attributes;
+ uint32_t seed;
+
+ // -- 5 --
+ uint64_t begin;
+ // -- 6 --
+ uint64_t end_incl;
+
+ // -- 7 --
+ uint64_t stride;
+
+ // -- 8 --
+ uint64_t udata[8];
+};
+
+// 128 bytes
+struct privileged_frame_t
+{
+ // -- 0 --
+ uint32_t pctrl;
+ uint32_t downstream_port_index; // [0,64), under PCIe only use port 0
+
+ // -- 1 --
+ // Under PCIe, then streamid is ignored.
+ uint32_t streamid;
+ uint32_t substreamid; // ~0u means no substreamid, otherwise must be a 20 bit number or ENGINE_FRAME_MISCONFIGURED
+
+ // -- 2 --
+ uint64_t pdata[14];
+};
+
+// 128 KiB
+struct engine_pair_t
+{
+ user_frame_t user[ 64 * 1024 / sizeof(user_frame_t)];
+ privileged_frame_t privileged[ 64 * 1024 / sizeof(privileged_frame_t)];
+};
+
+//
+// NOTE that we don't have a command that does some writes then some reads. For
+// the ACK this is probably not going to be much of a problem.
+//
+// On completion, an MSI will be sent if the msiaddress != 0.
+//
+enum cmd_t
+{
+ // ORDER IS IMPORTANT, see predicates later in this file.
+
+ // The frame was misconfigured.
+ ENGINE_FRAME_MISCONFIGURED = ~0u - 1,
+
+ // The engine encountered an error (downstream transaction aborted).
+ ENGINE_ERROR = ~0u,
+
+ // This frame is unimplemented or in use by the secure world.
+ //
+ // A user _can_ write this to cmd and it will be considered to be
+ // ENGINE_HALTED.
+ ENGINE_NO_FRAME = 0,
+
+ // The engine is halted.
+ ENGINE_HALTED = 1,
+
+ // The engine memcpy's from region [begin, end_incl] to address udata[0].
+ //
+ // If stride is 0 then ENGINE_ERROR is produced, udata[2] contains the error
+ // address. No MSI is generated.
+ //
+ // If stride is 1 then this is a normal memcpy(). If stride is larger then
+ // not all the data will be copied.
+ //
+ // The order and size of the transactions used are determined randomly using
+ // seed. If seed is:
+ // 0 -- do them from lowest address to highest address
+ // ~0u -- do them in reverse order
+ // otherwise use the value as a seed to do them in random order
+ // The ability to do them in a non-random order means that we stand a
+ // chance of getting merged event records.
+ //
+ // This models a work-load where we start with some reads and then do some
+ // writes.
+ ENGINE_MEMCPY = 2,
+
+ // The engine randomizes region [begin, end_incl] using rand48, seeded
+ // with seed and using the specified stride.
+ //
+ // The order and size of the transactions used are determined randomly using
+ // seed.
+ //
+ // The seed is used to create a random number generator that is used to
+ // choose the direction.
+ //
+ // A separate random number generator per transaction is then used based on
+ // seed and the address:
+ //
+ // seed_per_transaction = seed ^ (address >> 32) ^ (address & 0xFFFFffff);
+ //
+ // This seed is then used to seed a random number generator to fill the
+ // required space. The data used should be:
+ // uint64_t storage;
+ // for (uint8_t* p = (uintptr_t)begin; p != (uintptr_t)end_incl; ++ p)
+ // {
+ // // When we cross a 4 KiB we reseed.
+ // if ((p & 0xFFF) == 0 || p == begin)
+ // {
+ // testengine_random_seed_storage(
+ // V ^ ((uintptr_t)p >> 32) ^ (uint32_t((uintptr_t)p))
+ // );
+ // }
+ // assert( *p == (uint8_t)testengine_random(&storage) );
+ // ++ p;
+ // }
+ // This isn't the most efficient way of doing it as it throws away a lot of
+ // entropy from the call to testengine_random() but then we aren't aiming for
+ // good random numbers.
+ //
+ // If stride is 0 then ENGINE_ERROR is produced, data[2] contains the error
+ // address. (NOTE that data[1] is not used).
+ //
+ // If stride is 1 then this fills the entire buffer. If stride is larger
+ // then not all the data will be randomized.
+ //
+ // This models a write-only work-load.
+ ENGINE_RAND48 = 3,
+
+ // The engine reads [begin, end_incl], treats the region as a set of
+ // uint64_t and sums them, delivering the result to udata[1], using the
+ // specified stride.
+ //
+ // If stride is 0 then ENGINE_ERROR is produced, udata[2] is the error
+ // address.
+ //
+ // If stride is 1 then this sums the entire buffer. If stride is larger
+ // then not all the data will be summed.
+ //
+ // The order and size of the transactions used are determined randomly using
+ // seed.
+ //
+ // The begin must be 64 bit aligned (begin & 7) == 0 and the end_incl must
+ // end at the end of a 64 bit quantitity (end_incl & 7) == 7, otherwise
+ // ENGINE_FRAME_MISCONFIGURED is generated.
+ //
+ // This models a read-only work-load.
+ ENGINE_SUM64 = 4
+};
+
+static inline bool is_valid_and_running(cmd_t t_)
+{
+ unsigned const t = t_; // compensate for bad MSVC treating t_ as signed!
+ return ENGINE_MEMCPY <= t && t <= ENGINE_SUM64;
+}
+
+static inline bool is_in_error_state(cmd_t t_)
+{
+ return t_ == ENGINE_ERROR || t_ == ENGINE_FRAME_MISCONFIGURED;
+}
+
+static inline bool is_in_error_or_stopped_state(cmd_t t_)
+{
+ return t_ == ENGINE_NO_FRAME
+ || t_ == ENGINE_HALTED
+ || is_in_error_state(t_);
+}
+
+static inline bool is_invalid(cmd_t t_)
+{
+ unsigned const t = t_; // compensate for bad MSVC treating t_ as signed!
+ return ENGINE_SUM64 < t && t < ENGINE_FRAME_MISCONFIGURED;
+}
+
+/// pctrl has layout
+///
+/// 0 -- SSD_NS -- the stream and frame is non-secure
+/// -- note that if this is zero then it means the
+/// frame is controlled by secure SW and non-secure
+/// accesses are RAZ/WI (and so see ENGINE_NO_FRAME)
+/// Secure SW can only generate secure SSD StreamIDs
+/// This could be relaxed in the future if people need
+/// to.
+///
+/// 8 -- ATS_ENABLE -- CURRENTLY HAS NO EFFECT
+/// 9 -- PRI_ENABLE -- CURRENTLY HAS NO EFFECT
+///
+/// SSD_NS can only be altered by a secure access. Once clear then the
+/// corresponding user and privileged frames are accessible only to secure
+/// accesses. Non-secure accesses are RAZ/WI (and hence cmd will be
+/// ENGINE_NO_FRAME to non-secure accesses).
+///
+/// ATS_ENABLE/PRI_ENABLE are not currently implemented and their intent is for
+/// per-substreamid ATS/PRI support.
+///
+/// However, ATS/PRI support for the whole StreamID is advertised through the
+/// PCIe Extended Capabilities Header.
+///
+
+/// uctrl has layout
+///
+/// 0 -- MSI_ABORTED -- an MSI aborted (set by the engine)
+///
+/// 16-31 -- RATE -- some ill-defined metric for how fast to do the work!
+///
+
+#endif
diff --git a/spm/cactus/cactus_tests/cactus_message_loop.c b/spm/cactus/cactus_tests/cactus_message_loop.c
index e56e51e..c0abf2b 100644
--- a/spm/cactus/cactus_tests/cactus_message_loop.c
+++ b/spm/cactus/cactus_tests/cactus_message_loop.c
@@ -11,6 +11,7 @@
#include <ffa_helpers.h>
#include <events.h>
#include <platform.h>
+#include <spm_helpers.h>
/**
* Counter of the number of handled requests, for each CPU. The number of
@@ -43,9 +44,8 @@
{
uint64_t in_cmd;
- /* Get which core it is running from. */
- unsigned int core_pos = platform_get_core_pos(
- read_mpidr_el1() & MPID_MASK);
+ /* Get vCPU index for currently running vCPU. */
+ unsigned int core_pos = spm_get_my_core_pos();
if (cmd_args == NULL || ret == NULL) {
ERROR("Invalid arguments passed to %s!\n", __func__);
diff --git a/spm/cactus/cactus_tests/cactus_test_interrupts.c b/spm/cactus/cactus_tests/cactus_test_interrupts.c
index 4250445..2e0249c 100644
--- a/spm/cactus/cactus_tests/cactus_test_interrupts.c
+++ b/spm/cactus/cactus_tests/cactus_test_interrupts.c
@@ -86,8 +86,9 @@
/* Received FFA_INTERRUPT in blocked state. */
VERBOSE("Processing FFA_INTERRUPT while"
" blocked on direct response\n");
- unsigned int my_core_pos =
- platform_get_core_pos(read_mpidr_el1());
+
+ /* Get vCPU index for currently running vCPU. */
+ unsigned int my_core_pos = spm_get_my_core_pos();
ffa_ret = ffa_run(fwd_dest, my_core_pos);
} else {
diff --git a/spm/cactus/cactus_tests/cactus_test_memory_sharing.c b/spm/cactus/cactus_tests/cactus_test_memory_sharing.c
index 15ab0f1..8ea85c4 100644
--- a/spm/cactus/cactus_tests/cactus_test_memory_sharing.c
+++ b/spm/cactus/cactus_tests/cactus_test_memory_sharing.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -81,8 +81,16 @@
cactus_mem_send_get_retrv_flags(*args);
uint32_t words_to_write = cactus_mem_send_words_to_write(*args);
- expect(memory_retrieve(mb, &m, handle, source, vm_id,
- retrv_flags, mem_func), true);
+ struct ffa_memory_access receiver = ffa_memory_access_init(
+ vm_id, FFA_DATA_ACCESS_RW,
+ (mem_func == FFA_MEM_SHARE_SMC32)
+ ? FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED
+ : FFA_INSTRUCTION_ACCESS_NX,
+ 0, NULL);
+
+ expect(memory_retrieve(mb, &m, handle, source, &receiver, 1,
+ retrv_flags),
+ true);
composite = ffa_memory_region_get_composite(m, 0);
@@ -91,17 +99,15 @@
composite->constituents[0].page_count, PAGE_SIZE);
/* This test is only concerned with RW permissions. */
- if (ffa_get_data_access_attr(
- m->receivers[0].receiver_permissions.permissions) !=
- FFA_DATA_ACCESS_RW) {
+ if (m->receivers[0].receiver_permissions.permissions.data_access !=
+ FFA_DATA_ACCESS_RW) {
ERROR("Permissions not expected!\n");
return cactus_error_resp(vm_id, source, CACTUS_ERROR_TEST);
}
mem_attrs = MT_RW_DATA | MT_EXECUTE_NEVER;
- if (ffa_get_memory_security_attr(m->attributes) ==
- FFA_MEMORY_SECURITY_NON_SECURE) {
+ if (m->attributes.security == FFA_MEMORY_SECURITY_NON_SECURE) {
mem_attrs |= MT_NS;
}
@@ -182,7 +188,7 @@
{
struct ffa_value ffa_ret;
uint32_t mem_func = cactus_req_mem_send_get_mem_func(*args);
- ffa_id_t receiver = cactus_req_mem_send_get_receiver(*args);
+ ffa_id_t receiver_id = cactus_req_mem_send_get_receiver(*args);
ffa_memory_handle_t handle;
ffa_id_t vm_id = ffa_dir_msg_dest(*args);
ffa_id_t source = ffa_dir_msg_source(*args);
@@ -192,6 +198,10 @@
unsigned int mem_attrs;
int ret;
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(receiver_id,
+ mem_func);
+
VERBOSE("%x requested to send memory to %x (func: %x), page: %llx\n",
source, receiver, mem_func, (uint64_t)share_page_addr);
@@ -222,10 +232,9 @@
CACTUS_ERROR_TEST);
}
- handle = memory_init_and_send(
- (struct ffa_memory_region *)mb->send, PAGE_SIZE,
- vm_id, receiver, constituents,
- constituents_count, mem_func, &ffa_ret);
+ handle = memory_init_and_send(mb->send, PAGE_SIZE, vm_id, &receiver, 1,
+ constituents, constituents_count,
+ mem_func, &ffa_ret);
/*
* If returned an invalid handle, we should break the test.
@@ -236,8 +245,8 @@
ffa_error_code(ffa_ret));
}
- ffa_ret = cactus_mem_send_cmd(vm_id, receiver, mem_func, handle,
- 0, 10);
+ ffa_ret = cactus_mem_send_cmd(vm_id, receiver_id, mem_func, handle, 0,
+ 10);
if (!is_ffa_direct_response(ffa_ret)) {
return cactus_error_resp(vm_id, source, CACTUS_ERROR_FFA_CALL);
diff --git a/spm/cactus/cactus_tests/cactus_test_notifications.c b/spm/cactus/cactus_tests/cactus_test_notifications.c
index d8b88ed..6d7b41b 100644
--- a/spm/cactus/cactus_tests/cactus_test_notifications.c
+++ b/spm/cactus/cactus_tests/cactus_test_notifications.c
@@ -9,6 +9,7 @@
#include "sp_tests.h"
#include <ffa_helpers.h>
+#include <spm_helpers.h>
#include <debug.h>
/* Booleans to keep track of which CPUs handled NPI. */
@@ -33,9 +34,8 @@
void notification_pending_interrupt_handler(void)
{
- /* Get which core it is running from. */
- unsigned int core_pos = platform_get_core_pos(
- read_mpidr_el1() & MPID_MASK);
+ /* Get vCPU index for currently running vCPU. */
+ unsigned int core_pos = spm_get_my_core_pos();
VERBOSE("NPI handled in core %u\n", core_pos);
diff --git a/spm/cactus/cactus_tests/cactus_test_cpu_features.c b/spm/cactus/cactus_tests/cactus_test_simd.c
similarity index 85%
rename from spm/cactus/cactus_tests/cactus_test_cpu_features.c
rename to spm/cactus/cactus_tests/cactus_test_simd.c
index a1366d3..bcf1c38 100644
--- a/spm/cactus/cactus_tests/cactus_test_cpu_features.c
+++ b/spm/cactus/cactus_tests/cactus_test_simd.c
@@ -7,6 +7,7 @@
#include "cactus_message_loop.h"
#include "cactus_test_cmds.h"
#include <fpu.h>
+#include <spm_helpers.h>
#include "spm_common.h"
/*
@@ -21,7 +22,8 @@
*/
CACTUS_CMD_HANDLER(req_simd_fill, CACTUS_REQ_SIMD_FILL_CMD)
{
- core_pos = platform_get_core_pos(read_mpidr_el1());
+ /* Get vCPU index for currently running vCPU. */
+ core_pos = spm_get_my_core_pos();
fpu_state_write_rand(&sp_fpu_state_write);
return cactus_response(ffa_dir_msg_dest(*args),
ffa_dir_msg_source(*args),
@@ -36,7 +38,8 @@
{
bool test_succeed = false;
- unsigned int core_pos1 = platform_get_core_pos(read_mpidr_el1());
+ /* Get vCPU index for currently running vCPU. */
+ unsigned int core_pos1 = spm_get_my_core_pos();
if (core_pos1 == core_pos) {
fpu_state_read(&sp_fpu_state_read);
if (fpu_state_compare(&sp_fpu_state_write,
diff --git a/spm/cactus/cactus_tests/cactus_tests_smmuv3.c b/spm/cactus/cactus_tests/cactus_tests_smmuv3.c
index 3e6740b..5308d93 100644
--- a/spm/cactus/cactus_tests/cactus_tests_smmuv3.c
+++ b/spm/cactus/cactus_tests/cactus_tests_smmuv3.c
@@ -1,13 +1,14 @@
/*
- * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdint.h>
-#include <arch_helpers.h>
+#include <assert.h>
#include "cactus.h"
+#include <arch_helpers.h>
#include "cactus_message_loop.h"
#include <sp_platform_def.h>
#include "cactus_test_cmds.h"
@@ -19,63 +20,33 @@
#include "sp_tests.h"
#include <spm_common.h>
-/* Source and target address for memcopy operation */
-#define MEMCPY_SOURCE_BASE PLAT_CACTUS_MEMCPY_BASE
-#define MEMPCY_TOTAL_SIZE (PLAT_CACTUS_MEMCPY_RANGE / 2)
-#define MEMCPY_TARGET_BASE (MEMCPY_SOURCE_BASE + MEMPCY_TOTAL_SIZE)
-
/* Miscellaneous */
#define NO_SUBSTREAMID (0xFFFFFFFFU)
-#define TRANSFER_SIZE (MEMPCY_TOTAL_SIZE / FRAME_COUNT)
#define LOOP_COUNT (5000U)
-static bool run_smmuv3_test(void)
+static bool run_testengine(uint32_t operation, uintptr_t source_addr,
+ uintptr_t target_addr, size_t transfer_size,
+ uint32_t attributes)
{
- uint64_t source_addr, cpy_range, target_addr;
- uint64_t begin_addr, end_addr, dest_addr;
+ const uint32_t streamID_list[] = { 0U, 1U };
+ uintptr_t begin_addr;
+ uintptr_t end_addr;
+ uintptr_t dest_addr;
uint32_t status;
- unsigned int i, f, attempts;
+ uint32_t f;
+ uint32_t attempts;
- /*
- * The test engine's MEMCPY command copies data from the region in
- * range [begin, end_incl] to the region with base address as udata.
- * In this test, we configure the test engine to initiate memcpy from
- * scratch page located at MEMCPY_SOURCE_BASE to the page located at
- * address MEMCPY_TARGET_BASE
- */
-
- VERBOSE("CACTUS: Running SMMUv3 test\n");
-
- source_addr = MEMCPY_SOURCE_BASE;
- cpy_range = MEMPCY_TOTAL_SIZE;
- target_addr = MEMCPY_TARGET_BASE;
- uint32_t streamID_list[] = { 0U, 1U };
-
- uint64_t data[] = {
- ULL(0xBAADFEEDCEEBDAAF),
- ULL(0x0123456776543210)
- };
-
- /* Write pre-determined content to source pages */
- for (i = 0U; i < (cpy_range / 8U); i++) {
- mmio_write64_offset(source_addr, i * 8, data[i%2]);
- }
-
- /* Clean the data caches */
- clean_dcache_range(source_addr, cpy_range);
-
- /*
- * Make sure above load, store and cache maintenance instructions
- * complete before we start writing to TestEngine frame configuration
- * fields
- */
- dsbsy();
+ assert(operation == ENGINE_MEMCPY || operation == ENGINE_RAND48);
for (f = 0U; f < FRAME_COUNT; f++) {
- attempts = 0U;
- begin_addr = source_addr + (TRANSFER_SIZE * f);
- end_addr = begin_addr + TRANSFER_SIZE - 1U;
- dest_addr = target_addr + (TRANSFER_SIZE * f);
+ begin_addr = source_addr + (transfer_size * f);
+ end_addr = begin_addr + transfer_size - 1U;
+
+ if (operation == ENGINE_MEMCPY) {
+ dest_addr = target_addr + (transfer_size * f);
+ } else {
+ dest_addr = 0;
+ }
/* Initiate DMA sequence */
mmio_write32_offset(PRIV_BASE_FRAME + F_IDX(f), PCTRL_OFF, 0);
@@ -84,7 +55,12 @@
mmio_write32_offset(PRIV_BASE_FRAME + F_IDX(f), SUBSTREAM_ID_OFF, NO_SUBSTREAMID);
mmio_write32_offset(USR_BASE_FRAME + F_IDX(f), UCTRL_OFF, 0);
- mmio_write32_offset(USR_BASE_FRAME + F_IDX(f), SEED_OFF, 0);
+ mmio_write32_offset(USR_BASE_FRAME + F_IDX(f), ATTR_OFF, attributes);
+
+ if (operation == ENGINE_RAND48) {
+ mmio_write32_offset(USR_BASE_FRAME + F_IDX(f), SEED_OFF, (f + 1) * 42);
+ }
+
mmio_write64_offset(USR_BASE_FRAME + F_IDX(f), BEGIN_OFF, begin_addr);
mmio_write64_offset(USR_BASE_FRAME + F_IDX(f), END_CTRL_OFF, end_addr);
@@ -92,8 +68,8 @@
mmio_write64_offset(USR_BASE_FRAME + F_IDX(f), STRIDE_OFF, 1);
mmio_write64_offset(USR_BASE_FRAME + F_IDX(f), UDATA_OFF, dest_addr);
- mmio_write32_offset(USR_BASE_FRAME + F_IDX(f), CMD_OFF, ENGINE_MEMCPY);
- VERBOSE("SMMUv3TestEngine: Waiting for MEMCPY completion for frame: %u\n", f);
+ mmio_write32_offset(USR_BASE_FRAME + F_IDX(f), CMD_OFF, operation);
+ VERBOSE("SMMUv3TestEngine: waiting completion for frame: %u\n", f);
/*
* It is guaranteed that a read of "cmd" fields after writing to it will
@@ -101,17 +77,18 @@
* invalid.
*/
if (mmio_read32_offset(USR_BASE_FRAME + F_IDX(f), CMD_OFF) == ENGINE_MIS_CFG) {
- ERROR("SMMUv3TestEngine: Misconfigured for frame: %u\n", f);
+ ERROR("SMMUv3TestEngine: misconfigured for frame: %u\n", f);
return false;
}
- /* Wait for mem copy to be complete */
+ /* Wait for operation to be complete */
+ attempts = 0U;
while (attempts++ < LOOP_COUNT) {
status = mmio_read32_offset(USR_BASE_FRAME + F_IDX(f), CMD_OFF);
if (status == ENGINE_HALTED) {
break;
} else if (status == ENGINE_ERROR) {
- ERROR("SMMUv3: Test failed\n");
+ ERROR("SMMUv3: test failed, engine error.\n");
return false;
}
@@ -123,38 +100,70 @@
}
if (attempts == LOOP_COUNT) {
- ERROR("SMMUv3: Test failed\n");
+ ERROR("SMMUv3: test failed, exceeded max. wait loop.\n");
return false;
}
dsbsy();
}
- /*
- * Invalidate cached entries to force the CPU to fetch the data from
- * Main memory
- */
- inv_dcache_range(source_addr, cpy_range);
- inv_dcache_range(target_addr, cpy_range);
+ return true;
+}
- /* Compare source and destination memory locations for data */
- for (i = 0U; i < (cpy_range / 8U); i++) {
- if (mmio_read_64(source_addr + 8 * i) != mmio_read_64(target_addr + 8 * i)) {
- ERROR("SMMUv3: Mem copy failed: %llx\n", target_addr + 8 * i);
- return false;
+static bool run_smmuv3_memcpy(uintptr_t start_address, size_t size, uint32_t attributes)
+{
+ uintptr_t target_address;
+ size_t cpy_range = size >> 1;
+ bool ret;
+
+ /*
+ * The test engine's MEMCPY command copies data from the region in
+ * range [begin, end_incl] to the region with base address as udata.
+ * In this test, we configure the test engine to initiate memcpy from
+ * scratch page located at MEMCPY_SOURCE_BASE to the page located at
+ * address MEMCPY_TARGET_BASE
+ */
+
+ target_address = start_address + cpy_range;
+ ret = run_testengine(ENGINE_MEMCPY, start_address, target_address,
+ cpy_range / FRAME_COUNT, attributes);
+
+ if (ret) {
+ /*
+ * Invalidate cached entries to force the CPU to fetch the data from
+ * Main memory
+ */
+ inv_dcache_range(start_address, cpy_range);
+ inv_dcache_range(target_address, cpy_range);
+
+ /* Compare source and destination memory locations for data */
+ for (size_t i = 0U; i < (cpy_range / 8U); i++) {
+ if (mmio_read_64(start_address + 8 * i) !=
+ mmio_read_64(target_address + 8 * i)) {
+ ERROR("SMMUv3: Mem copy failed: %lx\n", target_address + 8 * i);
+ return false;
+ }
}
}
- return true;
+ return ret;
+}
+
+static bool run_smmuv3_rand48(uintptr_t start_address, size_t size, uint32_t attributes)
+{
+ return run_testengine(ENGINE_RAND48, start_address, 0, size / FRAME_COUNT, attributes);
}
CACTUS_CMD_HANDLER(smmuv3_cmd, CACTUS_DMA_SMMUv3_CMD)
{
- struct ffa_value ffa_ret;
ffa_id_t vm_id = ffa_dir_msg_dest(*args);
ffa_id_t source = ffa_dir_msg_source(*args);
+ uint32_t operation = args->arg4;
+ uintptr_t start_address = args->arg5;
+ size_t size = args->arg6;
+ uint32_t attributes = args->arg7;
- VERBOSE("Received request through direct message for DMA service\n");
+ VERBOSE("Received request through direct message for DMA service.\n");
/*
* At present, the test cannot be run concurrently on multiple SPs as
@@ -165,11 +174,21 @@
return cactus_error_resp(vm_id, source, 0);
}
- if (run_smmuv3_test()) {
- ffa_ret = cactus_success_resp(vm_id, source, 0);
- } else {
- ffa_ret = cactus_error_resp(vm_id, source, 0);
+ switch (operation) {
+ case ENGINE_MEMCPY:
+ if (run_smmuv3_memcpy(start_address, size, attributes)) {
+ return cactus_success_resp(vm_id, source, 0);
+ }
+ break;
+ case ENGINE_RAND48:
+ if (run_smmuv3_rand48(start_address, size, attributes)) {
+ return cactus_success_resp(vm_id, source, 0);
+ }
+ break;
+ default:
+ ERROR("SMMUv3TestEngine: unsupported operation (%u).\n", operation);
+ break;
}
- return ffa_ret;
+ return cactus_error_resp(vm_id, source, 0);
}
diff --git a/spm/cactus/cactus_tests/smmuv3_test_engine.h b/spm/cactus/cactus_tests/smmuv3_test_engine.h
index 32d86ac..d3a3dcf 100644
--- a/spm/cactus/cactus_tests/smmuv3_test_engine.h
+++ b/spm/cactus/cactus_tests/smmuv3_test_engine.h
@@ -32,6 +32,7 @@
/* Offset of various control fields belonging to User Frame */
#define CMD_OFF (0x0U)
#define UCTRL_OFF (0x4U)
+#define ATTR_OFF (0x20U)
#define SEED_OFF (0x24U)
#define BEGIN_OFF (0x28U)
#define END_CTRL_OFF (0x30U)
diff --git a/spm/cactus/plat/arm/fvp/fdts/cactus.dts b/spm/cactus/plat/arm/fvp/fdts/cactus.dts
index 78c5d97..12d7b84 100644
--- a/spm/cactus/plat/arm/fvp/fdts/cactus.dts
+++ b/spm/cactus/plat/arm/fvp/fdts/cactus.dts
@@ -88,6 +88,13 @@
base-address = <0x00000000 0x7404000>;
attributes = <0x3>; /* read-write */
};
+
+ smmuv3-ns-region {
+ description = "smmuv3-ns-region";
+ pages-count = <8>;
+ base-address = <0x0 0x90000000>;
+ attributes = <0xb>; /* ns-read-write */
+ };
};
device-regions {
diff --git a/spm/cactus/plat/arm/fvp/include/sp_platform_def.h b/spm/cactus/plat/arm/fvp/include/sp_platform_def.h
index 0b23189..0025dce 100644
--- a/spm/cactus/plat/arm/fvp/include/sp_platform_def.h
+++ b/spm/cactus/plat/arm/fvp/include/sp_platform_def.h
@@ -25,8 +25,9 @@
#define CACTUS_PL011_UART_CLK_IN_HZ PL011_UART2_CLK_IN_HZ
/* Scratch memory used for SMMUv3 driver testing purposes in Cactus SP */
-#define PLAT_CACTUS_MEMCPY_BASE ULL(0x7400000)
-#define PLAT_CACTUS_MEMCPY_RANGE ULL(0x8000)
+#define PLAT_CACTUS_MEMCPY_BASE ULL(0x7400000)
+#define PLAT_CACTUS_NS_MEMCPY_BASE ULL(0x90000000)
+#define PLAT_CACTUS_MEMCPY_RANGE ULL(0x8000)
/* Base address of user and PRIV frames in SMMUv3TestEngine */
#define USR_BASE_FRAME ULL(0x2BFE0000)
diff --git a/spm/cactus/plat/arm/tc/include/sp_platform_def.h b/spm/cactus/plat/arm/tc/include/sp_platform_def.h
index 4a86127..c5b548d 100644
--- a/spm/cactus/plat/arm/tc/include/sp_platform_def.h
+++ b/spm/cactus/plat/arm/tc/include/sp_platform_def.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -27,6 +27,7 @@
/* Scratch memory used for SMMUv3 driver testing purposes in Cactus SP */
/* SMMUv3 tests are disabled for TC platform */
#define PLAT_CACTUS_MEMCPY_BASE ULL(0xfe400000)
+#define PLAT_CACTUS_NS_MEMCPY_BASE ULL(0x90000000)
#define PLAT_CACTUS_MEMCPY_RANGE ULL(0x8000)
/* Base address of user and PRIV frames in SMMUv3TestEngine */
diff --git a/spm/common/sp_tests/sp_test_ffa.c b/spm/common/sp_tests/sp_test_ffa.c
index 73db187..219b149 100644
--- a/spm/common/sp_tests/sp_test_ffa.c
+++ b/spm/common/sp_tests/sp_test_ffa.c
@@ -17,7 +17,7 @@
/* FFA version test helpers */
#define FFA_MAJOR 1U
-#define FFA_MINOR 1U
+#define FFA_MINOR 2U
static uint32_t spm_version;
@@ -133,11 +133,11 @@
struct ffa_value ret = { 0 };
VERBOSE("FF-A Partition Info regs interface tests\n");
- ret = ffa_version(MAKE_FFA_VERSION(1, 1));
+ ret = ffa_version(MAKE_FFA_VERSION(1, 2));
uint32_t version = ret.fid;
if (version == FFA_ERROR_NOT_SUPPORTED) {
- ERROR("FFA_VERSION 1.1 not supported, skipping"
+ ERROR("FFA_VERSION 1.2 not supported, skipping"
" FFA_PARTITION_INFO_GET_REGS test.\n");
return;
}
diff --git a/spm/common/spm_helpers.c b/spm/common/spm_helpers.c
index 1cb5f4d..b2a4709 100644
--- a/spm/common/spm_helpers.c
+++ b/spm/common/spm_helpers.c
@@ -56,3 +56,16 @@
return (int64_t)ret.ret0;
}
+
+/**
+ * Return vCPU index for the currently running vCPU.
+ * Virtual MPIDR holds the linear vCPU index information in lower bits.
+ * Keep only first 24 bits (mapping to Aff0/Aff1/Aff2).
+ * Omit Aff3, bit [31], U[30], MT[24].
+ */
+unsigned int spm_get_my_core_pos(void)
+{
+ uint64_t mpidr = read_mpidr_el1();
+
+ return (unsigned int)(mpidr & 0xffffff);
+}
diff --git a/spm/common/spm_helpers.h b/spm/common/spm_helpers.h
index 1d3ddc2..59cdaf1 100644
--- a/spm/common/spm_helpers.h
+++ b/spm/common/spm_helpers.h
@@ -23,4 +23,6 @@
int64_t spm_interrupt_enable(uint32_t int_id, bool enable, enum interrupt_pin pin);
int64_t spm_interrupt_deactivate(uint32_t vint_id);
+unsigned int spm_get_my_core_pos(void);
+
#endif /* SPMC_H */
diff --git a/tftf/tests/extensions/mpam/test_mpam.c b/tftf/tests/extensions/mpam/test_mpam.c
new file mode 100644
index 0000000..eb40bc5
--- /dev/null
+++ b/tftf/tests/extensions/mpam/test_mpam.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <tftf.h>
+
+/* EL3 is expected to allow access to MPAM system registers from EL2.
+ * Reading these registers will trap to EL3 and crash when EL3 has not
+ * allowed access.
+ */
+
+test_result_t test_mpam_reg_access(void)
+{
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ SKIP_TEST_IF_MPAM_NOT_SUPPORTED();
+
+ read_mpamidr_el1();
+ read_mpam2_el2();
+
+ return TEST_RESULT_SUCCESS;
+#endif
+}
diff --git a/tftf/tests/misc_tests/test_invalid_access.c b/tftf/tests/misc_tests/test_invalid_access.c
index 343a553..3baeed5 100644
--- a/tftf/tests/misc_tests/test_invalid_access.c
+++ b/tftf/tests/misc_tests/test_invalid_access.c
@@ -1,9 +1,10 @@
/*
- * Copyright (c) 2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include "ffa_helpers.h"
#include <plat/common/platform.h>
#include <arch.h>
@@ -293,18 +294,21 @@
struct mailbox_buffers mb;
struct ffa_value ret;
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(
+ RECEIVER, FFA_MEM_SHARE_SMC32);
+
if (get_armv9_2_feat_rme_support() == 0U) {
return TEST_RESULT_SKIPPED;
}
- CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
GET_TFTF_MAILBOX(mb);
- handle = memory_init_and_send((struct ffa_memory_region *)mb.send,
- PAGE_SIZE, SENDER, RECEIVER,
- constituents, constituents_count,
- FFA_MEM_SHARE_SMC32, &ret);
+ handle = memory_init_and_send(mb.send, PAGE_SIZE, SENDER, &receiver, 1,
+ constituents, constituents_count,
+ FFA_MEM_SHARE_SMC32, &ret);
if (handle == FFA_MEMORY_HANDLE_INVALID) {
return TEST_RESULT_SUCCESS;
diff --git a/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c b/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c
index b840beb..d308784 100644
--- a/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c
+++ b/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c
@@ -278,3 +278,269 @@
return host_cmp_result();
}
+
+
+static test_result_t cpu_on_handler(void)
+{
+ bool ret;
+ struct rmi_rec_run *run;
+ unsigned int i;
+
+ spin_lock(&secondary_cpu_lock);
+ i = ++is_secondary_cpu_on;
+ spin_unlock(&secondary_cpu_lock);
+ ret = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
+ RMI_EXIT_PSCI, i);
+ if (ret) {
+ run = (struct rmi_rec_run *)realm.run[i];
+ if (run->exit.gprs[0] == SMC_PSCI_CPU_OFF) {
+ return TEST_RESULT_SUCCESS;
+ }
+ }
+ ERROR("Rec %d failed\n", i);
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * The test creates a realm with MAX recs
+ * On receiving PSCI_CPU_ON call from REC0 for all other recs,
+ * the test completes the PSCI call and re-enters REC0.
+ * Turn ON secondary CPUs upto a max of MAX_REC_COUNT.
+ * Each of the secondary then enters Realm with a different REC
+ * and executes the test REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD in Realm payload.
+ * It is expected that the REC will exit with PSCI_CPU_OFF as the exit reason.
+ * REC00 checks if all other CPUs are off, via PSCI_AFFINITY_INFO.
+ * Host completes the PSCI requests.
+ */
+test_result_t host_realm_multi_rec_multiple_cpu(void)
+{
+ bool ret1, ret2;
+ test_result_t ret3 = TEST_RESULT_FAIL;
+ int ret = RMI_ERROR_INPUT;
+ u_register_t rec_num;
+ u_register_t other_mpidr, my_mpidr;
+ struct rmi_rec_run *run;
+ unsigned int host_call_result, i = 0U;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+ RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+ RMI_NOT_RUNNABLE};
+ u_register_t exit_reason;
+ int cpu_node;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(MAX_REC_COUNT);
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, MAX_REC_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ is_secondary_cpu_on = 0U;
+ init_spinlock(&secondary_cpu_lock);
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, MAX_REC_COUNT);
+ ret1 = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
+ RMI_EXIT_PSCI, 0U);
+ if (!ret1) {
+ ERROR("Host did not receive CPU ON request\n");
+ goto destroy_realm;
+ }
+ while (true) {
+ run = (struct rmi_rec_run *)realm.run[0];
+ if (run->exit.gprs[0] != SMC_PSCI_CPU_ON_AARCH64) {
+ ERROR("Host did not receive CPU ON request\n");
+ goto destroy_realm;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num >= MAX_REC_COUNT) {
+ ERROR("Invalid mpidr requested\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_SUCCESS);
+ if (ret == RMI_SUCCESS) {
+ /* Re-enter REC0 complete CPU_ON */
+ ret = host_realm_rec_enter(&realm, &exit_reason,
+ &host_call_result, 0U);
+ if (ret != RMI_SUCCESS || exit_reason != RMI_EXIT_PSCI) {
+ break;
+ }
+ } else {
+ ERROR("host_rmi_psci_complete failed\n");
+ goto destroy_realm;
+ }
+ }
+ if (exit_reason != RMI_EXIT_HOST_CALL || host_call_result != TEST_RESULT_SUCCESS) {
+ ERROR("Realm failed\n");
+ goto destroy_realm;
+ }
+
+ /* Turn on all CPUs */
+ for_each_cpu(cpu_node) {
+ if (i == (MAX_REC_COUNT - 1U)) {
+ break;
+ }
+ other_mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (other_mpidr == my_mpidr) {
+ continue;
+ }
+
+ /* Power on the other CPU */
+ ret = tftf_try_cpu_on(other_mpidr, (uintptr_t)cpu_on_handler, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("TFTF CPU ON failed\n");
+ goto destroy_realm;
+ }
+ i++;
+ }
+
+ while (true) {
+ /* Re-enter REC0 complete PSCI_AFFINITY_INFO */
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 0U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("Rec0 re-enter failed\n");
+ goto destroy_realm;
+ }
+ if (run->exit.gprs[0] != SMC_PSCI_AFFINITY_INFO_AARCH64) {
+ break;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num >= MAX_REC_COUNT) {
+ ERROR("Invalid mpidr requested\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_SUCCESS);
+
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete failed\n");
+ goto destroy_realm;
+ }
+ }
+
+ if (ret == RMI_SUCCESS && exit_reason == RMI_EXIT_HOST_CALL) {
+ ret3 = host_call_result;
+ }
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if ((ret != RMI_SUCCESS) || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return ret3;
+}
+
+/*
+ * Test creates 2 realms with multiple recs
+ * realm1, rec1 requests CPU_ON for rec2
+ * Host calls PSCI_COMPLETE with wrong rec3, checks for error
+ * Host calls PSCI_COMPLETE with wrong rec from different realm, checks for error
+ * Host calls PSCI_COMPLETE with correct rec, checks for success
+ * Host attempts to execute rec which is NOT_RUNNABLE, checks for error
+ */
+test_result_t host_realm_multi_rec_multiple_cpu2(void)
+{
+ bool ret1, ret2;
+ test_result_t ret3 = TEST_RESULT_FAIL;
+ int ret = RMI_ERROR_INPUT;
+ u_register_t rec_num;
+ struct rmi_rec_run *run;
+ unsigned int host_call_result;
+ struct realm realm2;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+ RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+ RMI_NOT_RUNNABLE};
+ u_register_t exit_reason;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, MAX_REC_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_create_activate_realm_payload(&realm2, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE + PAGE_POOL_MAX_SIZE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ ret2 = host_destroy_realm(&realm);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ /* Realm to request CPU_ON for rec 2 */
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, 2U);
+ ret1 = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
+ RMI_EXIT_PSCI, 0U);
+ if (!ret1) {
+ ERROR("Host did not receive CPU ON request\n");
+ goto destroy_realm;
+ }
+ run = (struct rmi_rec_run *)realm.run[0];
+ if (run->exit.gprs[0] != SMC_PSCI_CPU_ON_AARCH64) {
+ ERROR("Host2 did not receive CPU ON request\n");
+ goto destroy_realm;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num >= MAX_REC_COUNT) {
+ ERROR("Invalid mpidr requested\n");
+ goto destroy_realm;
+ }
+
+ /* pass wrong target_rec, expect error */
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num + 1U],
+ (unsigned long)PSCI_E_SUCCESS);
+ if (ret == RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete wrong target_rec didn't fail ret=%x\n",
+ ret);
+ goto destroy_realm;
+ }
+
+ /* pass wrong target_rec from different realm, expect error */
+ ret = host_rmi_psci_complete(realm.rec[0], realm2.rec[0U],
+ (unsigned long)PSCI_E_SUCCESS);
+ if (ret == RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete wrong target_rec didn't fail ret=%x\n",
+ ret);
+ goto destroy_realm;
+ }
+
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_SUCCESS);
+
+ /* Try to run Rec3(CPU OFF/NOT_RUNNABLE), expect error */
+ ret = host_realm_rec_enter(&realm, &exit_reason,
+ &host_call_result, 3U);
+
+ if (ret == RMI_SUCCESS) {
+ ERROR("Expected error\n");
+ goto destroy_realm;
+ }
+ ret3 = TEST_RESULT_SUCCESS;
+
+destroy_realm:
+ ret1 = host_destroy_realm(&realm);
+ ret2 = host_destroy_realm(&realm2);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): failed destroy=%d, %d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+ return ret3;
+}
diff --git a/tftf/tests/runtime_services/secure_service/ffa_helpers.c b/tftf/tests/runtime_services/secure_service/ffa_helpers.c
index 9547c07..8b53bb0 100644
--- a/tftf/tests/runtime_services/secure_service/ffa_helpers.c
+++ b/tftf/tests/runtime_services/secure_service/ffa_helpers.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -142,16 +142,12 @@
return ffa_service_call(&args);
}
-
-/**
- * Initialises the header of the given `ffa_memory_region`, not including the
- * composite memory region offset.
- */
-static void ffa_memory_region_init_header(
- struct ffa_memory_region *memory_region, ffa_id_t sender,
- ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
- ffa_memory_handle_t handle, uint32_t tag, ffa_id_t receiver,
- ffa_memory_access_permissions_t permissions)
+void ffa_memory_region_init_header(struct ffa_memory_region *memory_region,
+ ffa_id_t sender,
+ ffa_memory_attributes_t attributes,
+ ffa_memory_region_flags_t flags,
+ ffa_memory_handle_t handle, uint32_t tag,
+ uint32_t receiver_count)
{
memory_region->sender = sender;
memory_region->attributes = attributes;
@@ -160,67 +156,47 @@
memory_region->tag = tag;
memory_region->memory_access_desc_size =
sizeof(struct ffa_memory_access);
- memory_region->receiver_count = 1;
- memory_region->receivers[0].receiver_permissions.receiver = receiver;
- memory_region->receivers[0].receiver_permissions.permissions =
- permissions;
- memory_region->receivers[0].receiver_permissions.flags = 0;
- memory_region->receivers[0].reserved_0 = 0;
- /* Receivers at the end of the `ffa_memory_region` structure. */
- memory_region->receivers_offset = sizeof(struct ffa_memory_region);
+ memory_region->receiver_count = receiver_count;
+ memory_region->receivers_offset =
+ offsetof(struct ffa_memory_region, receivers);
memset(memory_region->reserved, 0, sizeof(memory_region->reserved));
}
/**
- * Initialises the given `ffa_memory_region` and copies as many as possible of
- * the given constituents to it.
+ * Copies as many as possible of the given constituents to the respective
+ * memory region and sets the respective offset.
*
* Returns the number of constituents remaining which wouldn't fit, and (via
* return parameters) the size in bytes of the first fragment of data copied to
* `memory_region` (attributes, constituents and memory region header size), and
* the total size of the memory sharing message including all constituents.
*/
-uint32_t ffa_memory_region_init(
+static uint32_t ffa_memory_region_init_constituents(
struct ffa_memory_region *memory_region, size_t memory_region_max_size,
- ffa_id_t sender, ffa_id_t receiver,
const struct ffa_memory_region_constituent constituents[],
- uint32_t constituent_count, uint32_t tag,
- ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
- enum ffa_instruction_access instruction_access,
- enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
- enum ffa_memory_shareability shareability, uint32_t *total_length,
+ uint32_t constituent_count, uint32_t *total_length,
uint32_t *fragment_length)
{
- ffa_memory_access_permissions_t permissions = 0;
- ffa_memory_attributes_t attributes = 0;
struct ffa_composite_memory_region *composite_memory_region;
uint32_t fragment_max_constituents;
- uint32_t count_to_copy;
- uint32_t i;
uint32_t constituents_offset;
+ uint32_t count_to_copy;
- /* Set memory region's permissions. */
- ffa_set_data_access_attr(&permissions, data_access);
- ffa_set_instruction_access_attr(&permissions, instruction_access);
-
- /* Set memory region's page attributes. */
- ffa_set_memory_type_attr(&attributes, type);
- ffa_set_memory_cacheability_attr(&attributes, cacheability);
- ffa_set_memory_shareability_attr(&attributes, shareability);
-
- ffa_memory_region_init_header(memory_region, sender, attributes, flags,
- 0, tag, receiver, permissions);
/*
* Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
* ffa_memory_access)` must both be multiples of 16 (as verified by the
* asserts in `ffa_memory.c`, so it is guaranteed that the offset we
* calculate here is aligned to a 64-bit boundary and so 64-bit values
* can be copied without alignment faults.
+ * If there are multiple receiver endpoints, their respective access
+ * structure should point to the same offset value.
*/
- memory_region->receivers[0].composite_memory_region_offset =
- sizeof(struct ffa_memory_region) +
- memory_region->receiver_count *
- sizeof(struct ffa_memory_access);
+ for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
+ memory_region->receivers[i].composite_memory_region_offset =
+ sizeof(struct ffa_memory_region) +
+ memory_region->receiver_count *
+ sizeof(struct ffa_memory_access);
+ }
composite_memory_region =
ffa_memory_region_get_composite(memory_region, 0);
@@ -240,7 +216,7 @@
count_to_copy = fragment_max_constituents;
}
- for (i = 0; i < constituent_count; ++i) {
+ for (uint32_t i = 0; i < constituent_count; ++i) {
if (i < count_to_copy) {
composite_memory_region->constituents[i] =
constituents[i];
@@ -268,43 +244,117 @@
/**
* Initialises the given `ffa_memory_region` to be used for an
* `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
+ * Initialises the given `ffa_memory_region` and copies as many as possible of
+ * the given constituents to it.
+ *
+ * Returns the number of constituents remaining which wouldn't fit, and (via
+ * return parameters) the size in bytes of the first fragment of data copied to
+ * `memory_region` (attributes, constituents and memory region header size), and
+ * the total size of the memory sharing message including all constituents.
+ */
+uint32_t ffa_memory_region_init(
+ struct ffa_memory_region *memory_region, size_t memory_region_max_size,
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t tag,
+ ffa_memory_region_flags_t flags, enum ffa_memory_type type,
+ enum ffa_memory_cacheability cacheability,
+ enum ffa_memory_shareability shareability, uint32_t *total_length,
+ uint32_t *fragment_length)
+{
+ ffa_memory_attributes_t attributes = {
+ .type = type,
+ .cacheability = cacheability,
+ .shareability = shareability,
+ };
+
+ ffa_memory_region_init_header(memory_region, sender, attributes, flags,
+ 0, tag, receiver_count);
+
+ memcpy(memory_region->receivers, receivers,
+ receiver_count * sizeof(struct ffa_memory_access));
+
+ return ffa_memory_region_init_constituents(
+ memory_region, memory_region_max_size, constituents,
+ constituent_count, total_length, fragment_length);
+}
+
+uint32_t ffa_memory_fragment_init(
+ struct ffa_memory_region_constituent *fragment,
+ size_t fragment_max_size,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t *fragment_length)
+{
+ const uint32_t fragment_max_constituents =
+ fragment_max_size /
+ sizeof(struct ffa_memory_region_constituent);
+
+ uint32_t count_to_copy =
+ MIN(constituent_count, fragment_max_constituents);
+
+ for (uint32_t i = 0; i < count_to_copy; ++i) {
+ fragment[i] = constituents[i];
+ }
+
+ if (fragment_length != NULL) {
+ *fragment_length = count_to_copy *
+ sizeof(struct ffa_memory_region_constituent);
+ }
+
+ return constituent_count - count_to_copy;
+}
+
+/**
+ * Initialises the given `ffa_memory_region` to be used for an
+ * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
*
* Returns the size of the message written.
*/
uint32_t ffa_memory_retrieve_request_init(
struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
- ffa_id_t sender, ffa_id_t receiver, uint32_t tag,
- ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
- enum ffa_instruction_access instruction_access,
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count, uint32_t tag, ffa_memory_region_flags_t flags,
enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
enum ffa_memory_shareability shareability)
{
- ffa_memory_access_permissions_t permissions = 0;
- ffa_memory_attributes_t attributes = 0;
-
- /* Set memory region's permissions. */
- ffa_set_data_access_attr(&permissions, data_access);
- ffa_set_instruction_access_attr(&permissions, instruction_access);
-
- /* Set memory region's page attributes. */
- ffa_set_memory_type_attr(&attributes, type);
- ffa_set_memory_cacheability_attr(&attributes, cacheability);
- ffa_set_memory_shareability_attr(&attributes, shareability);
+ ffa_memory_attributes_t attributes = {
+ .type = type,
+ .cacheability = cacheability,
+ .shareability = shareability,
+ };
ffa_memory_region_init_header(memory_region, sender, attributes, flags,
- handle, tag, receiver, permissions);
+ handle, tag, receiver_count);
+
+ memcpy(memory_region->receivers, receivers,
+ receiver_count * sizeof(struct ffa_memory_access));
+
/*
* Offset 0 in this case means that the hypervisor should allocate the
* address ranges. This is the only configuration supported by Hafnium,
* as it enforces 1:1 mappings in the stage 2 page tables.
*/
- memory_region->receivers[0].composite_memory_region_offset = 0;
- memory_region->receivers[0].reserved_0 = 0;
+ for (uint32_t i = 0; i < receiver_count; i++) {
+ memory_region->receivers[i].composite_memory_region_offset = 0;
+ memory_region->receivers[i].reserved_0 = 0;
+ }
return sizeof(struct ffa_memory_region) +
memory_region->receiver_count * sizeof(struct ffa_memory_access);
}
+/**
+ * Configure `region` for a hypervisor retrieve request - i.e. all fields except
+ * `handle` are initialized to 0.
+ */
+void ffa_hypervisor_retrieve_request_init(struct ffa_memory_region *region,
+ ffa_memory_handle_t handle)
+{
+ memset(region, 0, sizeof(struct ffa_memory_region));
+ region->handle = handle;
+}
+
/*
* FFA Version ABI helper.
* Version fields:
@@ -545,6 +595,34 @@
return ffa_service_call(&args);
}
+struct ffa_value ffa_mem_frag_rx(ffa_memory_handle_t handle,
+ uint32_t fragment_offset)
+{
+ /* Note that sender MBZ at virtual instance. */
+ struct ffa_value args = {
+ .fid = FFA_MEM_FRAG_RX,
+ .arg1 = (uint32_t)handle,
+ .arg2 = (uint32_t)(handle >> 32),
+ .arg3 = fragment_offset,
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_mem_frag_tx(ffa_memory_handle_t handle,
+ uint32_t fragment_length)
+{
+ struct ffa_value args = {
+ .fid = FFA_MEM_FRAG_TX,
+ .arg1 = (uint32_t)handle,
+ .arg2 = (uint32_t)(handle >> 32),
+ .arg3 = fragment_length,
+ };
+
+ /* Note that sender MBZ at virtual instance. */
+ return ffa_service_call(&args);
+}
+
/** Create Notifications Bitmap for the given VM */
struct ffa_value ffa_notification_bitmap_create(ffa_id_t vm_id,
ffa_vcpu_count_t vcpu_count)
@@ -705,3 +783,26 @@
return ffa_service_call(&args);
}
+
+/**
+ * Initializes receiver permissions in a memory transaction descriptor.
+ */
+struct ffa_memory_access ffa_memory_access_init(
+ ffa_id_t receiver_id, enum ffa_data_access data_access,
+ enum ffa_instruction_access instruction_access,
+ ffa_memory_receiver_flags_t flags,
+ struct ffa_memory_access_impdef *impdef)
+{
+ struct ffa_memory_access access;
+ access.reserved_0 = 0;
+ access.composite_memory_region_offset = 0;
+ access.receiver_permissions.flags = flags;
+ access.receiver_permissions.receiver = receiver_id;
+ access.receiver_permissions.permissions.data_access = data_access;
+ access.receiver_permissions.permissions.instruction_access =
+ instruction_access;
+ access.impdef = impdef != NULL ? *impdef :
+ (struct ffa_memory_access_impdef){{0, 0}};
+
+ return access;
+}
diff --git a/tftf/tests/runtime_services/secure_service/spm_common.c b/tftf/tests/runtime_services/secure_service/spm_common.c
index 2e6d257..d6bbda5 100644
--- a/tftf/tests/runtime_services/secure_service/spm_common.c
+++ b/tftf/tests/runtime_services/secure_service/spm_common.c
@@ -4,6 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include "stdint.h"
+
#include "ffa_helpers.h"
#include <cactus_test_cmds.h>
#include <debug.h>
@@ -208,18 +210,13 @@
bool memory_retrieve(struct mailbox_buffers *mb,
struct ffa_memory_region **retrieved, uint64_t handle,
- ffa_id_t sender, ffa_id_t receiver,
- ffa_memory_region_flags_t flags,
- uint32_t mem_func)
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count, ffa_memory_region_flags_t flags)
{
struct ffa_value ret;
uint32_t fragment_size;
uint32_t total_size;
uint32_t descriptor_size;
- const enum ffa_instruction_access inst_access =
- (mem_func == FFA_MEM_SHARE_SMC32)
- ? FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED
- : FFA_INSTRUCTION_ACCESS_NX;
if (retrieved == NULL || mb == NULL) {
ERROR("Invalid parameters!\n");
@@ -227,18 +224,15 @@
}
descriptor_size = ffa_memory_retrieve_request_init(
- mb->send, handle, sender, receiver, 0, flags,
- FFA_DATA_ACCESS_RW,
- inst_access,
- FFA_MEMORY_NORMAL_MEM,
- FFA_MEMORY_CACHE_WRITE_BACK,
- FFA_MEMORY_INNER_SHAREABLE);
+ mb->send, handle, sender, receivers, receiver_count, 0, flags,
+ FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+ FFA_MEMORY_INNER_SHAREABLE);
ret = ffa_mem_retrieve_req(descriptor_size, descriptor_size);
if (ffa_func_id(ret) != FFA_MEM_RETRIEVE_RESP) {
- ERROR("Couldn't retrieve the memory page. Error: %x\n",
- ffa_error_code(ret));
+ ERROR("%s: couldn't retrieve the memory page. Error: %d\n",
+ __func__, ffa_error_code(ret));
return false;
}
@@ -277,6 +271,144 @@
return true;
}
+bool hypervisor_retrieve_request(struct mailbox_buffers *mb, uint64_t handle,
+ void *out, uint32_t out_size)
+{
+ struct ffa_value ret;
+ uint32_t total_size;
+ uint32_t fragment_size;
+ uint32_t fragment_offset;
+ struct ffa_memory_region *region_out = out;
+
+ if (out == NULL || mb == NULL) {
+ ERROR("Invalid parameters!\n");
+ return false;
+ }
+
+ ffa_hypervisor_retrieve_request_init(mb->send, handle);
+ ret = ffa_mem_retrieve_req(sizeof(struct ffa_memory_region),
+ sizeof(struct ffa_memory_region));
+
+ if (ffa_func_id(ret) != FFA_MEM_RETRIEVE_RESP) {
+ ERROR("%s: couldn't retrieve the memory page. Error: %d\n",
+ __func__, ffa_error_code(ret));
+ return false;
+ }
+
+ /*
+ * Following total_size and fragment_size are useful to keep track
+ * of the state of transaction. When the sum of all fragment_size of all
+ * fragments is equal to total_size, the memory transaction has been
+ * completed.
+ */
+ total_size = ret.arg1;
+ fragment_size = ret.arg2;
+ fragment_offset = fragment_size;
+ VERBOSE("total_size=%d, fragment_size=%d, fragment_offset=%d\n",
+ total_size, fragment_size, fragment_offset);
+
+ if (fragment_size > PAGE_SIZE) {
+ ERROR("Fragment should be smaller than RX buffer!\n");
+ return false;
+ }
+ if (total_size > out_size) {
+ ERROR("output buffer is not large enough to store all "
+ "fragments (total_size=%d, max_size=%d)\n",
+ total_size, out_size);
+ return false;
+ }
+
+ /*
+ * Copy the received message to the out buffer. This is necessary
+ * because `mb->recv` will be overwritten if sending a fragmented
+ * message.
+ */
+ memcpy(out, mb->recv, fragment_size);
+
+ if (region_out->receiver_count == 0) {
+ VERBOSE("copied region has no recivers\n");
+ return false;
+ }
+
+ if (region_out->receiver_count > MAX_MEM_SHARE_RECIPIENTS) {
+ VERBOSE("SPMC memory sharing operations support max of %u "
+ "receivers!\n",
+ MAX_MEM_SHARE_RECIPIENTS);
+ return false;
+ }
+
+ while (fragment_offset < total_size) {
+ VERBOSE("Calling again. frag offset: %d; total: %d\n",
+ fragment_offset, total_size);
+ ret = ffa_rx_release();
+ if (ret.fid != FFA_SUCCESS_SMC32) {
+ ERROR("ffa_rx_release() failed: %d\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ ret = ffa_mem_frag_rx(handle, fragment_offset);
+ if (ret.fid != FFA_MEM_FRAG_TX) {
+ ERROR("ffa_mem_frag_rx() failed: %d\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ if (ffa_frag_handle(ret) != handle) {
+ ERROR("%s: fragment handle mismatch: expected %llu, "
+ "got "
+ "%llu\n",
+ __func__, handle, ffa_frag_handle(ret));
+ return false;
+ }
+
+ /* Sender MBZ at physical instance. */
+ if (ffa_frag_sender(ret) != 0) {
+ ERROR("%s: fragment sender mismatch: expected %d, got "
+ "%d\n",
+ __func__, 0, ffa_frag_sender(ret));
+ return false;
+ }
+
+ fragment_size = ret.arg2;
+ if (fragment_size == 0) {
+ ERROR("%s: fragment size must not be 0\n", __func__);
+ return false;
+ }
+
+ if (fragment_offset + fragment_size > out_size) {
+ ERROR("%s: fragment is too big to fit in out buffer "
+ "(%d > %d)\n",
+ __func__, fragment_offset + fragment_size,
+ out_size);
+ return false;
+ }
+
+ VERBOSE("copying fragment at offset %d with size %d\n",
+ fragment_offset, fragment_size);
+ memcpy((uint8_t *)out + fragment_offset, mb->recv,
+ fragment_size);
+
+ fragment_offset += fragment_size;
+ }
+
+ if (fragment_offset != total_size) {
+ ERROR("%s: fragment size mismatch: expected %d, got %d\n",
+ __func__, total_size, fragment_offset);
+ return false;
+ }
+
+ ret = ffa_rx_release();
+ if (ret.fid != FFA_SUCCESS_SMC32) {
+ ERROR("ffa_rx_release() failed: %d\n", ffa_error_code(ret));
+ return false;
+ }
+
+ VERBOSE("Memory Retrieved!\n");
+
+ return true;
+}
+
bool memory_relinquish(struct ffa_mem_relinquish *m, uint64_t handle,
ffa_id_t id)
{
@@ -294,21 +426,103 @@
return true;
}
+bool send_fragmented_memory_region(
+ void *send_buffer,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t remaining_constituent_count,
+ uint32_t sent_length, uint32_t total_length, bool allocator_is_spmc,
+ struct ffa_value ret)
+{
+
+ uint64_t handle;
+ uint64_t handle_mask;
+ uint64_t expected_handle_mask =
+ allocator_is_spmc ? FFA_MEMORY_HANDLE_ALLOCATOR_SPMC
+ : FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
+ ffa_memory_handle_t fragment_handle = FFA_MEMORY_HANDLE_INVALID;
+ uint32_t fragment_length;
+
+ /* Send the remaining fragments. */
+ while (remaining_constituent_count != 0) {
+ VERBOSE("%s: %d constituents left to send.\n", __func__,
+ remaining_constituent_count);
+ if (ret.fid != FFA_MEM_FRAG_RX) {
+ ERROR("ffa_mem_frax_tx() failed: %d\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ if (fragment_handle == FFA_MEMORY_HANDLE_INVALID) {
+ fragment_handle = ffa_frag_handle(ret);
+ } else if (ffa_frag_handle(ret) != fragment_handle) {
+ ERROR("%s: fragment handle mismatch: expected %llu, "
+ "got %llu\n",
+ __func__, fragment_handle, ffa_frag_handle(ret));
+ return false;
+ }
+
+ if (ret.arg3 != sent_length) {
+ ERROR("%s: fragment length mismatch: expected %u, got "
+ "%lu\n",
+ __func__, sent_length, ret.arg3);
+ return false;
+ }
+
+ remaining_constituent_count = ffa_memory_fragment_init(
+ send_buffer, PAGE_SIZE,
+ constituents + constituent_count -
+ remaining_constituent_count,
+ remaining_constituent_count, &fragment_length);
+
+ ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
+ sent_length += fragment_length;
+ }
+
+ if (sent_length != total_length) {
+ ERROR("%s: fragment length mismatch: expected %u, got %u\n",
+ __func__, total_length, sent_length);
+ return false;
+ }
+
+ if (ret.fid != FFA_SUCCESS_SMC32) {
+ ERROR("%s: ffa_mem_frax_tx() failed: %d\n", __func__,
+ ffa_error_code(ret));
+ return false;
+ }
+
+ handle = ffa_mem_success_handle(ret);
+ handle_mask = (handle >> FFA_MEMORY_HANDLE_ALLOCATOR_SHIFT) &
+ FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
+
+ if (handle_mask != expected_handle_mask) {
+ ERROR("%s: handle mask mismatch: expected %llu, got %llu\n",
+ __func__, expected_handle_mask, handle_mask);
+ return false;
+ }
+
+ if (fragment_handle != FFA_MEMORY_HANDLE_INVALID && handle != fragment_handle) {
+ ERROR("%s: fragment handle mismatch: expectd %d, got %llu\n",
+ __func__, fragment_length, handle);
+ return false;
+ }
+
+ return true;
+}
+
/**
* Helper to call memory send function whose func id is passed as a parameter.
- * Returns a valid handle in case of successful operation or
- * FFA_MEMORY_HANDLE_INVALID if something goes wrong. Populates *ret with a
- * resulting smc value to handle the error higher in the test chain.
- *
- * TODO: Do memory send with 'ffa_memory_region' taking multiple segments
*/
ffa_memory_handle_t memory_send(
- struct ffa_memory_region *memory_region, uint32_t mem_func,
- uint32_t fragment_length, uint32_t total_length, struct ffa_value *ret)
+ void *send_buffer, uint32_t mem_func,
+ const struct ffa_memory_region_constituent *constituents,
+ uint32_t constituent_count, uint32_t remaining_constituent_count,
+ uint32_t fragment_length, uint32_t total_length,
+ struct ffa_value *ret)
{
- if (fragment_length != total_length) {
- ERROR("For now, fragment_length and total_length need to be"
- " equal");
+ if (remaining_constituent_count == 0 && fragment_length != total_length) {
+ ERROR("%s: fragment_length and total_length need "
+ "to be equal (fragment_length = %d, total_length = %d)\n",
+ __func__, fragment_length, total_length);
return FFA_MEMORY_HANDLE_INVALID;
}
@@ -323,16 +537,20 @@
*ret = ffa_mem_donate(total_length, fragment_length);
break;
default:
- *ret = (struct ffa_value){0};
- ERROR("TFTF - Invalid func id %x!\n", mem_func);
+ ERROR("%s: Invalid func id %d!\n", __func__, mem_func);
return FFA_MEMORY_HANDLE_INVALID;
}
if (is_ffa_call_error(*ret)) {
- VERBOSE("Failed to send memory to: %x\n",
- memory_region->receivers[0]
- .receiver_permissions
- .receiver);
+ VERBOSE("%s: Failed to send memory: %d\n", __func__,
+ ffa_error_code(ret));
+ return FFA_MEMORY_HANDLE_INVALID;
+ }
+
+ if (!send_fragmented_memory_region(
+ send_buffer, constituents, constituent_count,
+ remaining_constituent_count, fragment_length, total_length,
+ true, *ret)) {
return FFA_MEMORY_HANDLE_INVALID;
}
@@ -345,8 +563,8 @@
* doing it in this file for simplicity and for testing purposes.
*/
ffa_memory_handle_t memory_init_and_send(
- struct ffa_memory_region *memory_region, size_t memory_region_max_size,
- ffa_id_t sender, ffa_id_t receiver,
+ void *send_buffer, size_t memory_region_max_size, ffa_id_t sender,
+ struct ffa_memory_access receivers[], uint32_t receiver_count,
const struct ffa_memory_region_constituent *constituents,
uint32_t constituents_count, uint32_t mem_func, struct ffa_value *ret)
{
@@ -354,37 +572,20 @@
uint32_t total_length;
uint32_t fragment_length;
- enum ffa_data_access data_access = (mem_func == FFA_MEM_DONATE_SMC32) ?
- FFA_DATA_ACCESS_NOT_SPECIFIED :
- FFA_DATA_ACCESS_RW;
+ enum ffa_memory_type type =
+ (receiver_count == 1 && mem_func != FFA_MEM_SHARE_SMC32)
+ ? FFA_MEMORY_NOT_SPECIFIED_MEM
+ : FFA_MEMORY_NORMAL_MEM;
- /*
- * Initialize memory region structure for the respective memory send
- * operation. Note that memory type shall only be specified for memory
- * share, for memory lend and memory donate these shall remain
- * unspecified.
- */
remaining_constituent_count = ffa_memory_region_init(
- memory_region, memory_region_max_size, sender, receiver, constituents,
- constituents_count, 0, 0, data_access,
- FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
- mem_func == FFA_MEM_SHARE_SMC32
- ? FFA_MEMORY_NORMAL_MEM
- : FFA_MEMORY_NOT_SPECIFIED_MEM,
+ send_buffer, memory_region_max_size, sender, receivers,
+ receiver_count, constituents, constituents_count, 0, 0, type,
FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE,
&total_length, &fragment_length);
- /*
- * For simplicity of the test, and at least for the time being,
- * the following condition needs to be true.
- */
- if (remaining_constituent_count != 0U) {
- ERROR("Remaining constituent should be 0\n");
- return FFA_MEMORY_HANDLE_INVALID;
- }
-
- return memory_send(memory_region, mem_func, fragment_length,
- total_length, ret);
+ return memory_send(send_buffer, mem_func, constituents,
+ constituents_count, remaining_constituent_count,
+ fragment_length, total_length, ret);
}
static bool ffa_uuid_equal(const struct ffa_uuid uuid1,
@@ -610,3 +811,22 @@
{
return configure_trusted_wdog_interrupt(source, dest, false);
}
+
+/**
+ * Initializes receiver permissions in a memory transaction descriptor, using
+ * `mem_func` to determine the appropriate permissions.
+ */
+struct ffa_memory_access ffa_memory_access_init_permissions_from_mem_func(
+ ffa_id_t receiver_id, uint32_t mem_func)
+{
+
+ enum ffa_instruction_access instruction_access =
+ FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED;
+ enum ffa_data_access data_access =
+ (mem_func == FFA_MEM_DONATE_SMC32)
+ ? FFA_DATA_ACCESS_NOT_SPECIFIED
+ : FFA_DATA_ACCESS_RW;
+
+ return ffa_memory_access_init(receiver_id, data_access,
+ instruction_access, 0, NULL);
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_exceptions.c b/tftf/tests/runtime_services/secure_service/test_ffa_exceptions.c
index 5cb6dd4..9e58f2c 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_exceptions.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_exceptions.c
@@ -1,11 +1,12 @@
/*
- * Copyright (c) 2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <cactus_test_cmds.h>
+#include "ffa_helpers.h"
#include <debug.h>
#include <ffa_endpoints.h>
#include <ffa_svc.h>
@@ -51,18 +52,21 @@
struct ffa_value ret;
u_register_t retmm;
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(
+ RECEIVER, FFA_MEM_SHARE_SMC32);
+
if (get_armv9_2_feat_rme_support() == 0U) {
return TEST_RESULT_SKIPPED;
}
- CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
GET_TFTF_MAILBOX(mb);
- handle = memory_init_and_send((struct ffa_memory_region *)mb.send,
- PAGE_SIZE, SENDER, RECEIVER,
- constituents, constituents_count,
- FFA_MEM_SHARE_SMC32, &ret);
+ handle = memory_init_and_send(mb.send, PAGE_SIZE, SENDER, &receiver, 1,
+ constituents, constituents_count,
+ FFA_MEM_SHARE_SMC32, &ret);
if (handle == FFA_MEMORY_HANDLE_INVALID) {
return TEST_RESULT_FAIL;
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c b/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
index d42492b..027b021 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
@@ -1,10 +1,11 @@
/*
- * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <debug.h>
+#include "ffa_helpers.h"
#include <cactus_test_cmds.h>
#include <ffa_endpoints.h>
@@ -19,12 +20,21 @@
#define SENDER HYP_ID
#define RECEIVER SP_ID(1)
+/*
+ * A number of pages that is large enough that it must take two fragments to
+ * share.
+ */
+#define FRAGMENTED_SHARE_PAGE_COUNT \
+ (sizeof(struct ffa_memory_region) / \
+ sizeof(struct ffa_memory_region_constituent))
+
static const struct ffa_uuid expected_sp_uuids[] = {
{PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
};
/* Memory section to be used for memory share operations */
-static __aligned(PAGE_SIZE) uint8_t share_page[PAGE_SIZE];
+static __aligned(PAGE_SIZE) uint8_t
+ share_page[PAGE_SIZE * FRAGMENTED_SHARE_PAGE_COUNT];
static __aligned(PAGE_SIZE) uint8_t consecutive_donate_page[PAGE_SIZE];
static __aligned(PAGE_SIZE) uint8_t four_share_pages[PAGE_SIZE * 4];
@@ -55,16 +65,21 @@
const uint32_t constituents_count = sizeof(constituents) /
sizeof(struct ffa_memory_region_constituent);
+
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(borrower,
+ mem_func);
+
GET_TFTF_MAILBOX(mb);
handle = memory_init_and_send((struct ffa_memory_region *)mb.send,
- MAILBOX_SIZE, SENDER, borrower,
+ MAILBOX_SIZE, SENDER, &receiver, 1,
constituents, constituents_count,
mem_func, &ret);
if (handle != FFA_MEMORY_HANDLE_INVALID) {
- ERROR("Received a valid FF-A memory handle, and that isn't"
- " expected.\n");
+ ERROR("Received a valid FF-A memory handle, and that isn't "
+ "expected.\n");
return false;
}
@@ -90,7 +105,7 @@
(uintptr_t)0x0000880080001000,
};
- CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
for (unsigned i = 0; i < 3; i++) {
if (!test_memory_send_expect_denied(
@@ -133,10 +148,14 @@
/* Arbitrarily write 5 words after using memory. */
const uint32_t nr_words_to_write = 5;
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(borrower,
+ mem_func);
+
/***********************************************************************
* Check if SPMC has ffa_version and expected FFA endpoints are deployed.
**********************************************************************/
- CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
GET_TFTF_MAILBOX(mb);
@@ -149,7 +168,7 @@
}
handle = memory_init_and_send((struct ffa_memory_region *)mb.send,
- MAILBOX_SIZE, SENDER, borrower,
+ MAILBOX_SIZE, SENDER, &receiver, 1,
constituents, constituents_count,
mem_func, &ret);
@@ -239,7 +258,7 @@
const uint32_t constituents_count = sizeof(constituents) /
sizeof(struct ffa_memory_region_constituent);
- CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
test_result_t ret = test_memory_send_sp(FFA_MEM_DONATE_SMC32, SP_ID(1),
constituents,
@@ -283,7 +302,7 @@
/***********************************************************************
* Check if SPMC's ffa_version and presence of expected FF-A endpoints.
**********************************************************************/
- CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
ret = cactus_req_mem_send_send_cmd(HYP_ID, sender_sp, mem_func,
receiver_sp, non_secure);
@@ -315,7 +334,7 @@
/**********************************************************************
* Check if SPMC's ffa_version and presence of expected FF-A endpoints.
*********************************************************************/
- CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
ret = cactus_req_mem_send_send_cmd(HYP_ID, sender_sp, mem_func,
receiver_vm, false);
@@ -403,15 +422,18 @@
/* Arbitrarily write 10 words after using shared memory. */
const uint32_t nr_words_to_write = 10U;
- CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(
+ RECEIVER, FFA_MEM_LEND_SMC32);
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
GET_TFTF_MAILBOX(mb);
remaining_constituent_count = ffa_memory_region_init(
(struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
- RECEIVER, constituents, constituents_count, 0,
- FFA_MEMORY_REGION_FLAG_CLEAR, FFA_DATA_ACCESS_RW,
- FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ &receiver, 1, constituents, constituents_count, 0,
+ FFA_MEMORY_REGION_FLAG_CLEAR,
FFA_MEMORY_NOT_SPECIFIED_MEM, 0, 0,
&total_length, &fragment_length);
@@ -420,8 +442,9 @@
return TEST_RESULT_FAIL;
}
- handle = memory_send(mb.send, FFA_MEM_LEND_SMC32, fragment_length,
- total_length, &ret);
+ handle = memory_send(mb.send, FFA_MEM_LEND_SMC32, constituents,
+ constituents_count, remaining_constituent_count,
+ fragment_length, total_length, &ret);
if (handle == FFA_MEMORY_HANDLE_INVALID) {
ERROR("Memory Share failed!\n");
@@ -459,3 +482,434 @@
return TEST_RESULT_SUCCESS;
}
+
+/**
+ * Print `region` if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+ */
+static void print_memory_region(struct ffa_memory_region *region)
+{
+ VERBOSE("region.sender = %d\n", region->sender);
+ VERBOSE("region.attributes.shareability = %d\n",
+ region->attributes.shareability);
+ VERBOSE("region.attributes.cacheability = %d\n",
+ region->attributes.cacheability);
+ VERBOSE("region.attributes.type = %d\n", region->attributes.type);
+ VERBOSE("region.attributes.security = %d\n",
+ region->attributes.security);
+ VERBOSE("region.flags = %d\n", region->flags);
+ VERBOSE("region.handle = %lld\n", region->handle);
+ VERBOSE("region.tag = %lld\n", region->tag);
+ VERBOSE("region.memory_access_desc_size = %d\n",
+ region->memory_access_desc_size);
+ VERBOSE("region.receiver_count = %d\n", region->receiver_count);
+ VERBOSE("region.receivers_offset = %d\n", region->receivers_offset);
+}
+
+/**
+ * Used by hypervisor retrieve request test: validate descriptors provided by
+ * SPMC.
+ */
+static bool verify_retrieve_response(const struct ffa_memory_region *region1,
+ const struct ffa_memory_region *region2)
+{
+ if (region1->sender != region2->sender) {
+ ERROR("region1.sender=%d, expected %d\n", region1->sender,
+ region2->sender);
+ return false;
+ }
+ if (region1->attributes.shareability != region2->attributes.shareability) {
+ ERROR("region1.attributes.shareability=%d, expected %d\n",
+ region1->attributes.shareability,
+ region2->attributes.shareability);
+ return false;
+ }
+ if (region1->attributes.cacheability != region2->attributes.cacheability) {
+ ERROR("region1.attributes.cacheability=%d, expected %d\n",
+ region1->attributes.cacheability,
+ region2->attributes.cacheability);
+ return false;
+ }
+ if (region1->attributes.type != region2->attributes.type) {
+ ERROR("region1.attributes.type=%d, expected %d\n",
+ region1->attributes.type, region2->attributes.type);
+ return false;
+ }
+ if (region1->attributes.security != region2->attributes.security) {
+ ERROR("region1.attributes.security=%d, expected %d\n",
+ region1->attributes.security, region2->attributes.security);
+ return false;
+ }
+ if (region1->flags != region2->flags) {
+ ERROR("region1->flags=%d, expected %d\n", region1->flags,
+ region2->flags);
+ return false;
+ }
+ if (region1->handle != region2->handle) {
+ ERROR("region1.handle=%lld, expected %lld\n", region1->handle,
+ region2->handle);
+ return false;
+ }
+ if (region1->tag != region2->tag) {
+ ERROR("region1.tag=%lld, expected %lld\n", region1->tag, region2->tag);
+ return false;
+ }
+ if (region1->memory_access_desc_size != region2->memory_access_desc_size) {
+ ERROR("region1.memory_access_desc_size=%d, expected %d\n",
+ region1->memory_access_desc_size,
+ region2->memory_access_desc_size);
+ return false;
+ }
+ if (region1->receiver_count != region2->receiver_count) {
+ ERROR("region1.receiver_count=%d, expected %d\n",
+ region1->receiver_count, region2->receiver_count);
+ return false;
+ }
+ if (region1->receivers_offset != region2->receivers_offset) {
+ ERROR("region1.receivers_offset=%d, expected %d\n",
+ region1->receivers_offset, region2->receivers_offset);
+ return false;
+ }
+ for (uint32_t i = 0; i < 3; i++) {
+ if (region1->reserved[i] != 0) {
+ ERROR("region.reserved[%d]=%d, expected 0\n", i,
+ region1->reserved[i]);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * Used by hypervisor retrieve request test: validate descriptors provided by
+ * SPMC.
+ */
+static bool
+verify_constituent(struct ffa_memory_region_constituent *constituent,
+ void *address, uint32_t page_count)
+{
+ if (constituent->address != address) {
+ ERROR("constituent.address=%p, expected %p\n",
+ constituent->address, address);
+ return false;
+ }
+ if (constituent->page_count != page_count) {
+ ERROR("constituent.page_count=%d, expected %d\n",
+ constituent->page_count, page_count);
+ return false;
+ }
+ if (constituent->reserved != 0) {
+ ERROR("constituent.reserved=%d, expected 0\n",
+ constituent->reserved);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Used by hypervisor retrieve request test: validate descriptors provided by
+ * SPMC.
+ */
+static bool verify_composite(struct ffa_composite_memory_region *composite,
+ struct ffa_memory_region_constituent *constituent,
+ uint32_t page_count, uint32_t constituent_count)
+{
+ if (composite->page_count != page_count) {
+ ERROR("composite.page_count=%d, expected %d\n",
+ composite->page_count, page_count);
+ return false;
+ }
+ if (composite->constituent_count != constituent_count) {
+ ERROR("composite.constituent_count=%d, expected %d\n",
+ composite->constituent_count, constituent_count);
+ return false;
+ }
+ if (composite->reserved_0 != 0) {
+ ERROR("composite.reserved_0=%llu, expected 0\n",
+ composite->reserved_0);
+ return false;
+ }
+ for (uint32_t j = 0; j < composite->constituent_count; j++) {
+ if (!verify_constituent(constituent, share_page, 1)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool verify_receivers_impdef(struct ffa_memory_access_impdef impdef1,
+ struct ffa_memory_access_impdef impdef2)
+{
+ if (impdef1.val[0] != impdef2.val[0] ||
+ impdef1.val[1] != impdef2.val[1]) {
+ ERROR("ipmdef1.val[0]=%llu expected=%llu"
+ " ipmdef1.val[1]=%llu expected=%llu\n",
+ impdef1.val[0], impdef2.val[0],
+ impdef1.val[1], impdef2.val[1]);
+ return false;
+ }
+
+ return true;
+}
+
+static bool verify_permissions(
+ ffa_memory_access_permissions_t permissions1,
+ ffa_memory_access_permissions_t permissions2)
+{
+ uint8_t access1;
+ uint8_t access2;
+
+ access1 = permissions1.data_access;
+ access2 = permissions2.data_access;
+
+ if (access1 != access2) {
+ ERROR("permissions1.data_access=%u expected=%u\n",
+ access1, access2);
+ return false;
+ }
+
+ access1 = permissions1.instruction_access;
+ access2 = permissions2.instruction_access;
+
+ if (access1 != access2) {
+ ERROR("permissions1.instruction_access=%u expected=%u\n",
+ access1, access2);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Used by hypervisor retrieve request test: validate descriptors provided by
+ * SPMC.
+ */
+static bool verify_receivers(struct ffa_memory_access *receivers1,
+ struct ffa_memory_access *receivers2,
+ uint32_t receivers_count)
+{
+ for (uint32_t i = 0; i < receivers_count; i++) {
+ if (receivers1[i].receiver_permissions.receiver !=
+ receivers2[i].receiver_permissions.receiver) {
+ ERROR("receivers1[%u].receiver_permissions.receiver=%x"
+ " expected=%x\n", i,
+ receivers1[i].receiver_permissions.receiver,
+ receivers2[i].receiver_permissions.receiver);
+ return false;
+ }
+
+ if (receivers1[i].receiver_permissions.flags !=
+ receivers2[i].receiver_permissions.flags) {
+ ERROR("receivers1[%u].receiver_permissions.flags=%u"
+ " expected=%u\n", i,
+ receivers1[i].receiver_permissions.flags,
+ receivers2[i].receiver_permissions.flags);
+ return false;
+ }
+
+ if (!verify_permissions(
+ receivers1[i].receiver_permissions.permissions,
+ receivers2[i].receiver_permissions.permissions)) {
+ return false;
+ }
+
+ if (receivers1[i].composite_memory_region_offset !=
+ receivers2[i].composite_memory_region_offset) {
+ ERROR("receivers1[%u].composite_memory_region_offset=%u"
+ " expected %u\n",
+ i, receivers1[i].composite_memory_region_offset,
+ receivers2[i].composite_memory_region_offset);
+ return false;
+ }
+
+ if (!verify_receivers_impdef(receivers1[i].impdef,
+ receivers1[i].impdef)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Helper for performing a hypervisor retrieve request test.
+ */
+static test_result_t hypervisor_retrieve_request_test_helper(
+ uint32_t mem_func, bool multiple_receivers, bool fragmented)
+{
+ static struct ffa_memory_region_constituent
+ sent_constituents[FRAGMENTED_SHARE_PAGE_COUNT];
+ __aligned(PAGE_SIZE) static uint8_t page[PAGE_SIZE * 2] = {0};
+ struct ffa_memory_region *hypervisor_retrieve_response =
+ (struct ffa_memory_region *)page;
+ struct ffa_memory_region expected_response;
+ struct mailbox_buffers mb;
+ ffa_memory_handle_t handle;
+ struct ffa_value ret;
+ struct ffa_composite_memory_region *composite;
+ struct ffa_memory_access *retrvd_receivers;
+ uint32_t expected_flags = 0;
+
+ ffa_memory_attributes_t expected_attrs = {
+ .cacheability = FFA_MEMORY_CACHE_WRITE_BACK,
+ .shareability = FFA_MEMORY_INNER_SHAREABLE,
+ .security = FFA_MEMORY_SECURITY_NON_SECURE,
+ .type = (!multiple_receivers && mem_func != FFA_MEM_SHARE_SMC32)
+ ? FFA_MEMORY_NOT_SPECIFIED_MEM
+ : FFA_MEMORY_NORMAL_MEM,
+ };
+
+ struct ffa_memory_access receivers[2] = {
+ ffa_memory_access_init_permissions_from_mem_func(SP_ID(1),
+ mem_func),
+ ffa_memory_access_init_permissions_from_mem_func(SP_ID(2),
+ mem_func),
+ };
+
+ /*
+ * Only pass 1 receiver to `memory_init_and_send` if we are not testing
+ * the multiple-receivers functionality of the hypervisor retrieve
+ * request.
+ */
+ uint32_t receiver_count =
+ multiple_receivers ? ARRAY_SIZE(receivers) : 1;
+
+ uint32_t sent_constituents_count =
+ fragmented ? ARRAY_SIZE(sent_constituents) : 1;
+
+ /* Prepare the composite offset for the comparison. */
+ for (uint32_t i = 0; i < receiver_count; i++) {
+ receivers[i].composite_memory_region_offset =
+ sizeof(struct ffa_memory_region) +
+ receiver_count *
+ sizeof(struct ffa_memory_access);
+ }
+
+ /* Add a page per constituent, so that we exhaust the size of a single
+ * fragment (for testing). In a real world scenario, the whole region
+ * could be described in a single constituent.
+ */
+ for (uint32_t i = 0; i < sent_constituents_count; i++) {
+ sent_constituents[i].address = share_page + i * PAGE_SIZE;
+ sent_constituents[i].page_count = 1;
+ sent_constituents[i].reserved = 0;
+ }
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+ GET_TFTF_MAILBOX(mb);
+
+ switch (mem_func) {
+ case FFA_MEM_SHARE_SMC32:
+ expected_flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
+ break;
+ case FFA_MEM_LEND_SMC32:
+ expected_flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
+ break;
+ case FFA_MEM_DONATE_SMC32:
+ expected_flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
+ break;
+ default:
+ ERROR("Invalid mem_func: %d\n", mem_func);
+ panic();
+ }
+
+ handle = memory_init_and_send(mb.send, MAILBOX_SIZE, SENDER, receivers,
+ receiver_count, sent_constituents,
+ sent_constituents_count, mem_func, &ret);
+ if (handle == FFA_MEMORY_HANDLE_INVALID) {
+ ERROR("Memory share failed: %d\n", ffa_error_code(ret));
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send Hypervisor Retrieve request according to section 17.4.3 of FFA
+ * v1.2-REL0 specification.
+ */
+ if (!hypervisor_retrieve_request(&mb, handle, page, sizeof(page))) {
+ return TEST_RESULT_FAIL;
+ }
+
+ print_memory_region(hypervisor_retrieve_response);
+
+ /*
+ * Verify the received `FFA_MEM_RETRIEVE_RESP` aligns with
+ * transaction description sent above.
+ */
+ expected_response = (struct ffa_memory_region) {
+ .sender = SENDER,
+ .attributes = expected_attrs,
+ .flags = expected_flags,
+ .handle = handle,
+ .tag = 0,
+ .memory_access_desc_size = sizeof(struct ffa_memory_access),
+ .receiver_count = receiver_count,
+ .receivers_offset =
+ offsetof(struct ffa_memory_region, receivers),
+ };
+
+ if (!verify_retrieve_response(hypervisor_retrieve_response,
+ &expected_response)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ retrvd_receivers =
+ ffa_memory_region_get_receiver(hypervisor_retrieve_response, 0);
+
+ if (!verify_receivers(retrvd_receivers,
+ receivers, receiver_count)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ composite = ffa_memory_region_get_composite(
+ hypervisor_retrieve_response, 0);
+
+ if (!verify_composite(composite, composite->constituents,
+ sent_constituents_count, sent_constituents_count)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Reclaim for the SPMC to deallocate any data related to the handle.
+ */
+ ret = ffa_mem_reclaim(handle, 0);
+ if (is_ffa_call_error(ret)) {
+ ERROR("Memory reclaim failed: %d\n", ffa_error_code(ret));
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_hypervisor_share_retrieve(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_SHARE_SMC32, false, false);
+}
+
+test_result_t test_hypervisor_lend_retrieve(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_LEND_SMC32, false, false);
+}
+
+test_result_t test_hypervisor_donate_retrieve(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_DONATE_SMC32, false, false);
+}
+
+test_result_t test_hypervisor_share_retrieve_multiple_receivers(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_SHARE_SMC32, true, false);
+}
+
+test_result_t test_hypervisor_lend_retrieve_multiple_receivers(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_LEND_SMC32, true, false);
+}
+
+test_result_t test_hypervisor_share_retrieve_fragmented(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_SHARE_SMC32, false, true);
+}
+
+test_result_t test_hypervisor_lend_retrieve_fragmented(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_LEND_SMC32, false, true);
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_spm_cpu_features.c b/tftf/tests/runtime_services/secure_service/test_spm_simd.c
similarity index 100%
rename from tftf/tests/runtime_services/secure_service/test_spm_cpu_features.c
rename to tftf/tests/runtime_services/secure_service/test_spm_simd.c
diff --git a/tftf/tests/runtime_services/secure_service/test_spm_smmu.c b/tftf/tests/runtime_services/secure_service/test_spm_smmu.c
index 238feae..ae2068a 100644
--- a/tftf/tests/runtime_services/secure_service/test_spm_smmu.c
+++ b/tftf/tests/runtime_services/secure_service/test_spm_smmu.c
@@ -7,33 +7,142 @@
#include <cactus_test_cmds.h>
#include <debug.h>
#include <ffa_endpoints.h>
+#include <runtime_services/host_realm_managment/host_realm_rmi.h>
#include <smccc.h>
#include <spm_test_helpers.h>
#include <test_helpers.h>
+#if PLAT_fvp || PLAT_tc
+#include <sp_platform_def.h>
static const struct ffa_uuid expected_sp_uuids[] = { {PRIMARY_UUID} };
+#endif
+
+#define TEST_DMA_ENGINE_MEMCPY (2U)
+#define TEST_DMA_ENGINE_RAND48 (3U)
+
+#define TEST_DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_S (0xffU)
+#define TEST_DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_NS (0x2ffU)
/**************************************************************************
- * Send a command to SP1 initiate DMA service with the help of a peripheral
- * device upstream of an SMMUv3 IP
+ * test_smmu_spm
+ *
+ * Send commands to SP1 initiate DMA service with the help of a peripheral
+ * device upstream of an SMMUv3 IP.
+ * The scenario involves randomizing a secure buffer (first DMA operation),
+ * copying this buffer to another location (second DMA operation),
+ * and checking (by CPU) that both buffer contents match.
**************************************************************************/
test_result_t test_smmu_spm(void)
{
+#if PLAT_fvp || PLAT_tc
struct ffa_value ret;
/**********************************************************************
* Check SPMC has ffa_version and expected FFA endpoints are deployed.
**********************************************************************/
- CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
- VERBOSE("Sending command to SP %x for initiating DMA transfer\n",
+ VERBOSE("Sending command to SP %x for initiating DMA transfer.\n",
SP_ID(1));
- ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1));
+ /*
+ * Randomize first half of a secure buffer from the secure world
+ * through the SMMU test engine DMA.
+ * Destination memory attributes are secure rawaWB.
+ */
+ ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1),
+ TEST_DMA_ENGINE_RAND48,
+ PLAT_CACTUS_MEMCPY_BASE,
+ PLAT_CACTUS_MEMCPY_RANGE / 2,
+ TEST_DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_S << 16);
+
+ /* Expect the SMMU DMA operation to pass. */
+ if (cactus_get_response(ret) != CACTUS_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Copy first half to second half of the buffer and
+ * check both match.
+ * Source and destination memory attributes are secure rawaWB.
+ */
+ ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1),
+ TEST_DMA_ENGINE_MEMCPY,
+ PLAT_CACTUS_MEMCPY_BASE,
+ PLAT_CACTUS_MEMCPY_RANGE,
+ (TEST_DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_S << 16) |
+ TEST_DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_S);
+
+ /* Expect the SMMU DMA operation to pass. */
if (cactus_get_response(ret) != CACTUS_SUCCESS) {
return TEST_RESULT_FAIL;
}
return TEST_RESULT_SUCCESS;
+#else
+ return TEST_RESULT_SKIPPED;
+#endif
}
+/**************************************************************************
+ * test_smmu_spm_invalid_access
+ *
+ * The scenario changes a NS buffer PAS into Realm PAS. It then queries a SP
+ * to initiate a secure DMA operation on this buffer through the SMMU.
+ * The operation is expected to fail as a secure DMA transaction to a Realm
+ * region fails SMMU GPC checks.
+ **************************************************************************/
+test_result_t test_smmu_spm_invalid_access(void)
+{
+#if PLAT_fvp || PLAT_tc
+ struct ffa_value ret;
+ u_register_t retmm;
+
+ /* Skip this test if RME is not implemented. */
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /**********************************************************************
+ * Check SPMC has ffa_version and expected FFA endpoints are deployed.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ /* Update the NS buffer to Realm PAS. */
+ retmm = host_rmi_granule_delegate((u_register_t)PLAT_CACTUS_NS_MEMCPY_BASE);
+ if (retmm != 0UL) {
+ ERROR("Granule delegate failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ VERBOSE("Sending command to SP %x for initiating DMA transfer.\n",
+ SP_ID(1));
+
+ /*
+ * Attempt randomizing the buffer (now turned into Realm PAS)
+ * from the secure world through the SMMU test engine DMA.
+ * Destination memory attributes are non-secure rawaWB.
+ */
+ ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1),
+ TEST_DMA_ENGINE_RAND48,
+ PLAT_CACTUS_NS_MEMCPY_BASE,
+ PLAT_CACTUS_MEMCPY_RANGE,
+ TEST_DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_NS << 16);
+
+ /* Update the buffer back to NS PAS. */
+ retmm = host_rmi_granule_undelegate((u_register_t)PLAT_CACTUS_NS_MEMCPY_BASE);
+ if (retmm != 0UL) {
+ ERROR("Granule undelegate failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Expect the SMMU DMA operation to have failed. */
+ if (cactus_get_response(ret) != CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+#else
+ return TEST_RESULT_SKIPPED;
+#endif
+}
diff --git a/tftf/tests/tests-cpu-extensions.mk b/tftf/tests/tests-cpu-extensions.mk
index 0b1839a..b0af1a3 100644
--- a/tftf/tests/tests-cpu-extensions.mk
+++ b/tftf/tests/tests-cpu-extensions.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2024, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -11,6 +11,7 @@
extensions/ecv/test_ecv.c \
extensions/fgt/test_fgt.c \
extensions/pmuv3/test_pmuv3.c \
+ extensions/mpam/test_mpam.c \
extensions/mte/test_mte.c \
extensions/pauth/test_pauth.c \
extensions/sme/test_sme.c \
diff --git a/tftf/tests/tests-cpu-extensions.xml b/tftf/tests/tests-cpu-extensions.xml
index 3935911..3b93344 100644
--- a/tftf/tests/tests-cpu-extensions.xml
+++ b/tftf/tests/tests-cpu-extensions.xml
@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+ Copyright (c) 2018-2024, Arm Limited. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-->
@@ -16,6 +16,7 @@
<testcase name="Use Pointer Authentication Instructions" function="test_pauth_instructions" />
<testcase name="Check for Pointer Authentication key leakage from EL3" function="test_pauth_leakage" />
<testcase name="Check for Pointer Authentication key leakage from TSP" function="test_pauth_leakage_tsp" />
+ <testcase name="Access MPAM registers" function="test_mpam_reg_access" />
<testcase name="Use MTE Instructions" function="test_mte_instructions" />
<testcase name="Check for MTE register leakage" function="test_mte_leakage" />
<testcase name="Use FGT Registers" function="test_fgt_enabled" />
diff --git a/tftf/tests/tests-firmware-handoff.xml b/tftf/tests/tests-firmware-handoff.xml
index 2761626..4b4b2a4 100644
--- a/tftf/tests/tests-firmware-handoff.xml
+++ b/tftf/tests/tests-firmware-handoff.xml
@@ -7,7 +7,7 @@
-->
<testsuites>
- <testsuite name="Firmware Handoff" description="Validate transfer list managed by firmware hanoff framework">
+ <testsuite name="Firmware Handoff" description="Validate transfer list managed by firmware handoff framework">
<testcase name="Validate transfer list header" function="test_handoff_header" />
<testcase name="Validate HW_CONFIG in transfer list" function="test_handoff_dtb_payload" />
</testsuite>
diff --git a/tftf/tests/tests-realm-payload.xml b/tftf/tests/tests-realm-payload.xml
index dbd9cd5..68d68dd 100644
--- a/tftf/tests/tests-realm-payload.xml
+++ b/tftf/tests/tests-realm-payload.xml
@@ -12,6 +12,10 @@
function="host_test_realm_create_enter" />
<testcase name="Multiple Realm EL1 creation and execution test"
function="host_test_multiple_realm_create_enter" />
+ <testcase name="Realm payload multi rec multiple cpu"
+ function="host_realm_multi_rec_multiple_cpu" />
+ <testcase name="Realm payload multi rec validations"
+ function="host_realm_multi_rec_multiple_cpu2" />
<testcase name="Realm payload multi rec single cpu"
function="host_realm_multi_rec_single_cpu" />
<testcase name="Realm payload multi rec psci denied"
diff --git a/tftf/tests/tests-spm.mk b/tftf/tests/tests-spm.mk
index 174e11d..97b3a49 100644
--- a/tftf/tests/tests-spm.mk
+++ b/tftf/tests/tests-spm.mk
@@ -27,7 +27,7 @@
ifeq (${ARCH},aarch64)
TESTS_SOURCES += \
$(addprefix tftf/tests/runtime_services/secure_service/, \
- test_spm_cpu_features.c \
+ test_spm_simd.c \
)
TESTS_SOURCES += \
diff --git a/tftf/tests/tests-spm.xml b/tftf/tests/tests-spm.xml
index e47039f..09e0fd7 100644
--- a/tftf/tests/tests-spm.xml
+++ b/tftf/tests/tests-spm.xml
@@ -88,6 +88,20 @@
<testsuite name="FF-A Memory Sharing"
description="Test FF-A Memory Sharing ABIs" >
+ <testcase name="Hypervisor share + memory retrieve request"
+ function="test_hypervisor_share_retrieve" />
+ <testcase name="Hypervisor lend + memory retrieve request"
+ function="test_hypervisor_lend_retrieve" />
+ <testcase name="Hypervisor donate + memory retrieve request"
+ function="test_hypervisor_donate_retrieve" />
+ <testcase name="Hypervisor share + memory retrieve request (multiple receivers)"
+ function="test_hypervisor_share_retrieve_multiple_receivers" />
+ <testcase name="Hypervisor lend + memory retrieve request (multiple receivers)"
+ function="test_hypervisor_lend_retrieve_multiple_receivers" />
+ <testcase name="Hypervisor share + memory retrieve request (fragmented)"
+ function="test_hypervisor_share_retrieve_fragmented" />
+ <testcase name="Hypervisor lend + memory retrieve request (fragmented)"
+ function="test_hypervisor_lend_retrieve_fragmented" />
<testcase name="Lend Memory to Secure World"
function="test_mem_lend_sp" />
<testcase name="Lend memory, clear flag set"
@@ -156,6 +170,8 @@
description="Initiate stage2 translation for streams from upstream peripherals" >
<testcase name="Check DMA command by SMMUv3TestEngine completes"
function="test_smmu_spm" />
+ <testcase name="Check secure peripheral access to a realm region is aborted"
+ function="test_smmu_spm_invalid_access" />
</testsuite>
<testsuite name="FF-A Notifications"