Merge "fix(realm): fix RMI commands arguments descriptions"
diff --git a/Makefile b/Makefile
index 3aa67db..8f6e14c 100644
--- a/Makefile
+++ b/Makefile
@@ -196,6 +196,7 @@
 $(eval $(call add_define,TFTF_DEFINES,USE_NVM))
 $(eval $(call add_define,TFTF_DEFINES,ENABLE_REALM_PAYLOAD_TESTS))
 $(eval $(call add_define,TFTF_DEFINES,TRANSFER_LIST))
+$(eval $(call add_define,TFTF_DEFINES,PLAT_AMU_GROUP1_COUNTERS_MASK))
 
 ################################################################################
 
diff --git a/include/common/test_helpers.h b/include/common/test_helpers.h
index 8cddc72..3caca39 100644
--- a/include/common/test_helpers.h
+++ b/include/common/test_helpers.h
@@ -115,6 +115,25 @@
 		}								\
 	} while (0)
 
+#define SKIP_TEST_IF_DEBUGV8P9_NOT_SUPPORTED()					\
+	do {									\
+		if (arch_get_debug_version() != 				\
+				ID_AA64DFR0_V8_9_DEBUG_ARCH_SUPPORTED) {	\
+			tftf_testcase_printf(					\
+				"Debugv8p9 not supported\n");			\
+			return TEST_RESULT_SKIPPED;				\
+		}								\
+	} while (0)
+
+#define SKIP_TEST_IF_FGT2_NOT_SUPPORTED()					\
+	do {									\
+		if (!is_armv8_9_fgt2_present()) {				\
+			tftf_testcase_printf(					\
+				"Fine Grained Traps 2 not supported\n");	\
+			return TEST_RESULT_SKIPPED;				\
+		}								\
+	} while (0)
+
 #define SKIP_TEST_IF_SVE_NOT_SUPPORTED()					\
 	do {									\
 		if (!is_armv8_2_sve_present()) {				\
@@ -327,6 +346,15 @@
 		}								\
 	} while (false)
 
+#define SKIP_TEST_IF_LS64_NOT_SUPPORTED()					\
+	do {									\
+		if (get_feat_ls64_support() ==					\
+			ID_AA64ISAR1_LS64_NOT_SUPPORTED) {			\
+			tftf_testcase_printf("ARMv8.7-ls64 not supported");	\
+			return TEST_RESULT_SKIPPED;				\
+		}								\
+	} while (false)
+
 /* Helper macro to verify if system suspend API is supported */
 #define is_psci_sys_susp_supported()	\
 		(tftf_get_psci_feature_info(SMC_PSCI_SYSTEM_SUSPEND)		\
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index 6a0d286..c45358e 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -132,46 +132,52 @@
 #define ID_AA64PFR0_EL1_SHIFT			U(4)
 #define ID_AA64PFR0_EL2_SHIFT			U(8)
 #define ID_AA64PFR0_EL3_SHIFT			U(12)
-#define ID_AA64PFR0_AMU_SHIFT			U(44)
-#define ID_AA64PFR0_AMU_LENGTH			U(4)
-#define ID_AA64PFR0_AMU_MASK			ULL(0xf)
-#define ID_AA64PFR0_AMU_NOT_SUPPORTED		U(0x0)
-#define ID_AA64PFR0_AMU_V1			U(0x1)
-#define ID_AA64PFR0_AMU_V1P1			U(0x2)
 #define ID_AA64PFR0_ELX_MASK			ULL(0xf)
-#define ID_AA64PFR0_SVE_SHIFT			U(32)
-#define ID_AA64PFR0_SVE_WIDTH			U(4)
-#define ID_AA64PFR0_SVE_MASK			ULL(0xf)
-#define ID_AA64PFR0_SVE_LENGTH			U(4)
-#define ID_AA64PFR0_MPAM_SHIFT			U(40)
-#define ID_AA64PFR0_MPAM_MASK			ULL(0xf)
-#define ID_AA64PFR0_DIT_SHIFT			U(48)
-#define ID_AA64PFR0_DIT_MASK			ULL(0xf)
-#define ID_AA64PFR0_DIT_LENGTH			U(4)
-#define ID_AA64PFR0_DIT_SUPPORTED		U(1)
-#define ID_AA64PFR0_CSV2_SHIFT			U(56)
-#define ID_AA64PFR0_CSV2_MASK			ULL(0xf)
-#define ID_AA64PFR0_CSV2_WIDTH			U(4)
-#define ID_AA64PFR0_CSV2_NOT_SUPPORTED		ULL(0x0)
-#define ID_AA64PFR0_CSV2_SUPPORTED		ULL(0x1)
-#define ID_AA64PFR0_CSV2_2_SUPPORTED		ULL(0x2)
-#define ID_AA64PFR0_FEAT_RME_SHIFT		U(52)
-#define ID_AA64PFR0_FEAT_RME_MASK		ULL(0xf)
-#define ID_AA64PFR0_FEAT_RME_LENGTH		U(4)
-#define ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED	U(0)
-#define ID_AA64PFR0_FEAT_RME_V1			U(1)
-#define ID_AA64PFR0_RAS_MASK			ULL(0xf)
-#define ID_AA64PFR0_RAS_SHIFT			U(28)
-#define ID_AA64PFR0_RAS_WIDTH			U(4)
-#define ID_AA64PFR0_RAS_NOT_SUPPORTED		ULL(0x0)
-#define ID_AA64PFR0_RAS_SUPPORTED		ULL(0x1)
-#define ID_AA64PFR0_RASV1P1_SUPPORTED		ULL(0x2)
+#define ID_AA64PFR0_FP_SHIFT			U(16)
+#define ID_AA64PFR0_FP_WIDTH			U(4)
+#define ID_AA64PFR0_FP_MASK			U(0xf)
+#define ID_AA64PFR0_ADVSIMD_SHIFT		U(20)
+#define ID_AA64PFR0_ADVSIMD_WIDTH		U(4)
+#define ID_AA64PFR0_ADVSIMD_MASK		U(0xf)
 #define ID_AA64PFR0_GIC_SHIFT			U(24)
 #define ID_AA64PFR0_GIC_WIDTH			U(4)
 #define ID_AA64PFR0_GIC_MASK			ULL(0xf)
 #define ID_AA64PFR0_GIC_NOT_SUPPORTED		ULL(0x0)
 #define ID_AA64PFR0_GICV3_GICV4_SUPPORTED	ULL(0x1)
 #define ID_AA64PFR0_GICV4_1_SUPPORTED		ULL(0x2)
+#define ID_AA64PFR0_RAS_MASK			ULL(0xf)
+#define ID_AA64PFR0_RAS_SHIFT			U(28)
+#define ID_AA64PFR0_RAS_WIDTH			U(4)
+#define ID_AA64PFR0_RAS_NOT_SUPPORTED		ULL(0x0)
+#define ID_AA64PFR0_RAS_SUPPORTED		ULL(0x1)
+#define ID_AA64PFR0_RASV1P1_SUPPORTED		ULL(0x2)
+#define ID_AA64PFR0_SVE_SHIFT			U(32)
+#define ID_AA64PFR0_SVE_WIDTH			U(4)
+#define ID_AA64PFR0_SVE_MASK			ULL(0xf)
+#define ID_AA64PFR0_SVE_LENGTH			U(4)
+#define ID_AA64PFR0_MPAM_SHIFT			U(40)
+#define ID_AA64PFR0_MPAM_MASK			ULL(0xf)
+#define ID_AA64PFR0_AMU_SHIFT			U(44)
+#define ID_AA64PFR0_AMU_LENGTH			U(4)
+#define ID_AA64PFR0_AMU_MASK			ULL(0xf)
+#define ID_AA64PFR0_AMU_NOT_SUPPORTED		U(0x0)
+#define ID_AA64PFR0_AMU_V1			U(0x1)
+#define ID_AA64PFR0_AMU_V1P1			U(0x2)
+#define ID_AA64PFR0_DIT_SHIFT			U(48)
+#define ID_AA64PFR0_DIT_MASK			ULL(0xf)
+#define ID_AA64PFR0_DIT_LENGTH			U(4)
+#define ID_AA64PFR0_DIT_SUPPORTED		U(1)
+#define ID_AA64PFR0_FEAT_RME_SHIFT		U(52)
+#define ID_AA64PFR0_FEAT_RME_MASK		ULL(0xf)
+#define ID_AA64PFR0_FEAT_RME_LENGTH		U(4)
+#define ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED	U(0)
+#define ID_AA64PFR0_FEAT_RME_V1			U(1)
+#define ID_AA64PFR0_CSV2_SHIFT			U(56)
+#define ID_AA64PFR0_CSV2_MASK			ULL(0xf)
+#define ID_AA64PFR0_CSV2_WIDTH			U(4)
+#define ID_AA64PFR0_CSV2_NOT_SUPPORTED		ULL(0x0)
+#define ID_AA64PFR0_CSV2_SUPPORTED		ULL(0x1)
+#define ID_AA64PFR0_CSV2_2_SUPPORTED		ULL(0x2)
 
 /* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
 #define ID_AA64DFR0_PMS_SHIFT		U(32)
@@ -194,6 +200,7 @@
 #define ID_AA64DFR0_V8_DEBUG_ARCH_VHE_SUPPORTED	U(7)
 #define ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED	U(8)
 #define ID_AA64DFR0_V8_4_DEBUG_ARCH_SUPPORTED	U(9)
+#define ID_AA64DFR0_V8_9_DEBUG_ARCH_SUPPORTED   U(0xb)
 
 /* ID_AA64DFR0_EL1.HPMN0 definitions */
 #define ID_AA64DFR0_HPMN0_SHIFT			U(60)
@@ -302,6 +309,7 @@
 #define ID_AA64MMFR0_EL1_FGT_MASK		ULL(0xf)
 #define ID_AA64MMFR0_EL1_FGT_NOT_SUPPORTED	ULL(0x0)
 #define ID_AA64MMFR0_EL1_FGT_SUPPORTED		ULL(0x1)
+#define ID_AA64MMFR0_EL1_FGT2_SUPPORTED		ULL(0x2)
 
 #define ID_AA64MMFR0_EL1_TGRAN4_SHIFT		U(28)
 #define ID_AA64MMFR0_EL1_TGRAN4_WIDTH		U(4)
@@ -1285,10 +1293,24 @@
 #define HDFGWTR_EL2		S3_4_C3_C1_5
 
 /*******************************************************************************
+ * Armv8.9 - Fine Grained Virtualization Traps 2 Registers
+ ******************************************************************************/
+#define HFGRTR2_EL2            S3_4_C3_C1_2
+#define HFGWTR2_EL2            S3_4_C3_C1_3
+#define HFGITR2_EL2            S3_4_C3_C1_7
+#define HDFGRTR2_EL2           S3_4_C3_C1_0
+#define HDFGWTR2_EL2           S3_4_C3_C1_1
+
+/*******************************************************************************
  * Armv8.6 - Enhanced Counter Virtualization Registers
  ******************************************************************************/
 #define CNTPOFF_EL2  S3_4_C14_C0_6
 
+/******************************************************************************
+ * Armv8.9 - Breakpoint and Watchpoint Selection Register
+ ******************************************************************************/
+#define MDSELR_EL1		S2_0_C0_C4_2
+
 /*******************************************************************************
  * Armv9.0 - Trace Buffer Extension System Registers
  ******************************************************************************/
diff --git a/include/lib/aarch64/arch_features.h b/include/lib/aarch64/arch_features.h
index b6d0ce7..a2ed5be 100644
--- a/include/lib/aarch64/arch_features.h
+++ b/include/lib/aarch64/arch_features.h
@@ -42,6 +42,20 @@
 		ID_AA64PFR0_SVE_MASK) == 1U;
 }
 
+static inline bool is_feat_advsimd_present(void)
+{
+	u_register_t id_aa64pfr0_advsimd =
+		EXTRACT(ID_AA64PFR0_ADVSIMD, read_id_aa64pfr0_el1());
+	return (id_aa64pfr0_advsimd == 0 || id_aa64pfr0_advsimd == 1);
+}
+
+static inline bool is_feat_fp_present(void)
+{
+	u_register_t id_aa64pfr0_fp =
+		EXTRACT(ID_AA64PFR0_FP, read_id_aa64pfr0_el1());
+	return (id_aa64pfr0_fp == 0 || id_aa64pfr0_fp == 1);
+}
+
 static inline bool is_armv8_2_ttcnp_present(void)
 {
 	return ((read_id_aa64mmfr2_el1() >> ID_AA64MMFR2_EL1_CNP_SHIFT) &
@@ -126,8 +140,14 @@
 
 static inline bool is_armv8_6_fgt_present(void)
 {
+	return (((read_id_aa64mmfr0_el1() >> ID_AA64MMFR0_EL1_FGT_SHIFT) &
+		ID_AA64MMFR0_EL1_FGT_MASK) != 0U);
+}
+
+static inline bool is_armv8_9_fgt2_present(void)
+{
 	return ((read_id_aa64mmfr0_el1() >> ID_AA64MMFR0_EL1_FGT_SHIFT) &
-		ID_AA64MMFR0_EL1_FGT_MASK) == ID_AA64MMFR0_EL1_FGT_SUPPORTED;
+		ID_AA64MMFR0_EL1_FGT_MASK) == ID_AA64MMFR0_EL1_FGT2_SUPPORTED;
 }
 
 static inline unsigned long int get_armv8_6_ecv_support(void)
@@ -419,4 +439,10 @@
 		!= ID_AA64MMFR1_EL1_LOR_NOT_SUPPORTED;
 }
 
+static inline unsigned int get_feat_ls64_support(void)
+{
+	return ((read_id_aa64isar1_el1() >> ID_AA64ISAR1_LS64_SHIFT) &
+		ID_AA64ISAR1_LS64_MASK);
+}
+
 #endif /* ARCH_FEATURES_H */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index 4b9c33e..076a9cd 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -572,6 +572,13 @@
 DEFINE_RENAME_SYSREG_RW_FUNCS(hdfgrtr_el2, HDFGRTR_EL2)
 DEFINE_RENAME_SYSREG_RW_FUNCS(hdfgwtr_el2, HDFGWTR_EL2)
 
+/* Armv8.9 Fine Grained Virtualization Traps 2 Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(hfgrtr2_el2,  HFGRTR2_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(hfgwtr2_el2,  HFGWTR2_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(hfgitr2_el2,  HFGITR2_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(hdfgrtr2_el2, HDFGRTR2_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(hdfgwtr2_el2, HDFGWTR2_EL2)
+
 /* Armv8.6 Enhanced Counter Virtualization Register */
 DEFINE_RENAME_SYSREG_RW_FUNCS(cntpoff_el2,  CNTPOFF_EL2)
 
@@ -611,6 +618,8 @@
 DEFINE_RENAME_SYSREG_RW_FUNCS(trcclaimclr, TRCCLAIMCLR)
 DEFINE_RENAME_SYSREG_READ_FUNC(trcdevarch, TRCDEVARCH)
 
+DEFINE_RENAME_SYSREG_READ_FUNC(mdselr_el1, MDSELR_EL1)
+
 /* FEAT_HCX HCRX_EL2 */
 DEFINE_RENAME_SYSREG_RW_FUNCS(hcrx_el2, HCRX_EL2)
 
diff --git a/include/lib/extensions/amu.h b/include/lib/extensions/amu.h
index d5950ca..3ce053d 100644
--- a/include/lib/extensions/amu.h
+++ b/include/lib/extensions/amu.h
@@ -71,7 +71,11 @@
 
 #if AMU_GROUP1_NR_COUNTERS
 uint64_t amu_group1_cnt_read(unsigned int idx);
+uint64_t amu_group1_num_counters(void);
+uint64_t amu_group1_evtype_read(unsigned int idx);
+void amu_group1_evtype_write(unsigned int idx, uint64_t val);
 #if __aarch64__
+uint64_t amu_group1_is_counter_implemented(unsigned int idx);
 uint64_t amu_group1_voffset_read(unsigned int idx);
 void amu_group1_voffset_write(unsigned int idx, uint64_t val);
 #endif
diff --git a/include/lib/extensions/amu_private.h b/include/lib/extensions/amu_private.h
index 7ae17d9..b98178f 100644
--- a/include/lib/extensions/amu_private.h
+++ b/include/lib/extensions/amu_private.h
@@ -11,6 +11,10 @@
 
 uint64_t amu_group0_cnt_read_internal(unsigned int idx);
 uint64_t amu_group1_cnt_read_internal(unsigned int idx);
+uint64_t amu_group1_num_counters_internal(void);
+uint64_t amu_group1_is_cnt_impl_internal(unsigned int idx);
+void amu_group1_evtype_write_internal(unsigned int idx, uint64_t val);
+uint64_t amu_group1_evtype_read_internal(unsigned int idx);
 
 #if __aarch64__
 uint64_t amu_group0_voffset_read_internal(unsigned int idx);
diff --git a/include/lib/extensions/sme.h b/include/lib/extensions/sme.h
index 4a7e9b7..7175b71 100644
--- a/include/lib/extensions/sme.h
+++ b/include/lib/extensions/sme.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -13,6 +13,9 @@
 #define MAX_VL_B		(MAX_VL / 8)
 #define SME_SVQ_ARCH_MAX	(MASK(SMCR_ELX_LEN) >> SMCR_ELX_LEN_SHIFT)
 
+/* convert SME SVL in bytes to SVQ */
+#define SME_SVL_TO_SVQ(svl_bytes)		(((svl_bytes) >> 4U) - 1U)
+
 /* get a random Streaming SVE VQ b/w 0 to SME_SVQ_ARCH_MAX */
 #define SME_GET_RANDOM_SVQ	(rand() % (SME_SVQ_ARCH_MAX + 1))
 
@@ -31,6 +34,7 @@
 /* SME feature related prototypes. */
 void sme_smstart(smestart_instruction_type_t smstart_type);
 void sme_smstop(smestop_instruction_type_t smstop_type);
+uint32_t sme_probe_svl(uint8_t sme_max_svq);
 
 /* Assembly function prototypes. */
 uint64_t sme_rdsvl_1(void);
diff --git a/include/lib/transfer_list.h b/include/lib/transfer_list.h
index 8bf16cf..f1e4181 100644
--- a/include/lib/transfer_list.h
+++ b/include/lib/transfer_list.h
@@ -24,7 +24,21 @@
 
 // version of the register convention used.
 // Set to 1 for both AArch64 and AArch32 according to fw handoff spec v0.9
-#define REGISTER_CONVENTION_VERSION_MASK (1 << 24)
+#define REGISTER_CONVENTION_VERSION_SHIFT_64	UL(32)
+#define REGISTER_CONVENTION_VERSION_SHIFT_32	UL(24)
+#define REGISTER_CONVENTION_VERSION_MASK	UL(0xff)
+
+#define TRANSFER_LIST_HANDOFF_X1_VALUE(__version) 	\
+	((TRANSFER_LIST_SIGNATURE &	\
+	((1UL << REGISTER_CONVENTION_VERSION_SHIFT_64) - 1)) | 	\
+	(((__version) & REGISTER_CONVENTION_VERSION_MASK) <<	\
+	 REGISTER_CONVENTION_VERSION_SHIFT_64))
+
+#define TRANSFER_LIST_HANDOFF_R1_VALUE(__version) 	\
+	((TRANSFER_LIST_SIGNATURE &	\
+	((1UL << REGISTER_CONVENTION_VERSION_SHIFT_32) - 1)) | 	\
+	(((__version) & REGISTER_CONVENTION_VERSION_MASK) <<	\
+	 REGISTER_CONVENTION_VERSION_SHIFT_32))
 
 #ifndef __ASSEMBLER__
 
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index 0a1e653..491edde 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -75,11 +75,58 @@
 uint64_t amu_group1_cnt_read(unsigned int idx)
 {
 	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
-	assert(idx < AMU_GROUP1_NR_COUNTERS);
+	assert(amu_group1_supported());
+	assert(idx < amu_group1_num_counters());
 
 	return amu_group1_cnt_read_internal(idx);
 }
 
+/* Return the number of counters available for group 1 */
+uint64_t amu_group1_num_counters(void)
+{
+	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
+	assert(amu_group1_supported());
+
+	uint64_t num_counters = amu_group1_num_counters_internal();
+	if (num_counters < AMU_GROUP1_NR_COUNTERS) {
+		return num_counters;
+	}
+	return AMU_GROUP1_NR_COUNTERS;
+}
+
+/* Return the type for group 1  counter with index `idx`. */
+uint64_t amu_group1_evtype_read(unsigned int idx)
+{
+	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
+	assert(amu_group1_supported());
+	assert(idx < amu_group1_num_counters());
+
+	return amu_group1_evtype_read_internal(idx);
+}
+
+/* Set the type for group 1 counter with index `idx`. */
+void amu_group1_evtype_write(unsigned int idx, uint64_t val)
+{
+	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
+	assert(amu_group1_supported());
+	assert(idx < amu_group1_num_counters());
+
+	amu_group1_evtype_write_internal(idx, val);
+}
+
+/*
+ * Return whether group 1 counter at index `idx` is implemented.
+ *
+ * Using this function requires v8.6 FEAT_AMUv1p1 support.
+ */
+uint64_t amu_group1_is_counter_implemented(unsigned int idx)
+{
+	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
+	assert(amu_group1_supported());
+
+	return amu_group1_is_cnt_impl_internal(idx);
+}
+
 /*
  * Read the group 1 offset register for a given index.
  *
diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S
index b15daf3..1f2ae5c 100644
--- a/lib/extensions/amu/aarch64/amu_helpers.S
+++ b/lib/extensions/amu/aarch64/amu_helpers.S
@@ -10,6 +10,10 @@
 
 	.globl	amu_group0_cnt_read_internal
 	.globl	amu_group1_cnt_read_internal
+	.globl	amu_group1_evtype_write_internal
+	.globl	amu_group1_evtype_read_internal
+	.globl	amu_group1_num_counters_internal
+	.globl	amu_group1_is_cnt_impl_internal
 
 	/* FEAT_AMUv1p1 virtualisation offset register functions */
 	.globl	amu_group0_voffset_read_internal
@@ -258,3 +262,127 @@
 	write	AMEVCNTVOFF1E_EL2	/* index 14 */
 	write	AMEVCNTVOFF1F_EL2	/* index 15 */
 endfunc amu_group1_voffset_write_internal
+
+/*
+ * uint64_t amu_group1_evtype_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU event type register
+ * and return it in `x0`.
+ */
+func amu_group1_evtype_read_internal
+	adr	x1, 1f
+#if ENABLE_ASSERTIONS
+	/*
+	 * It can be dangerous to call this function with an
+	 * out of bounds index.  Ensure `idx` is valid.
+	 */
+	tst	x0, #~0xF
+	ASM_ASSERT(eq)
+#endif
+	/*
+	 * Given `idx` calculate address of mrs/ret instruction pair
+	 * in the table below.
+	 */
+	add	x1, x1, x0, lsl #3	/* each mrs/ret sequence is 8 bytes */
+#if ENABLE_BTI
+	add	x1, x1, x0, lsl #2	/* + "bti j" instruction */
+#endif
+	br	x1
+
+1:	read	AMEVTYPER10_EL0	/* index 0 */
+	read	AMEVTYPER11_EL0	/* index 1 */
+	read	AMEVTYPER12_EL0	/* index 2 */
+	read	AMEVTYPER13_EL0	/* index 3 */
+	read	AMEVTYPER14_EL0	/* index 4 */
+	read	AMEVTYPER15_EL0	/* index 5 */
+	read	AMEVTYPER16_EL0	/* index 6 */
+	read	AMEVTYPER17_EL0	/* index 7 */
+	read	AMEVTYPER18_EL0	/* index 8 */
+	read	AMEVTYPER19_EL0	/* index 9 */
+	read	AMEVTYPER1A_EL0	/* index 10 */
+	read	AMEVTYPER1B_EL0	/* index 11 */
+	read	AMEVTYPER1C_EL0	/* index 12 */
+	read	AMEVTYPER1D_EL0	/* index 13 */
+	read	AMEVTYPER1E_EL0	/* index 14 */
+	read	AMEVTYPER1F_EL0	/* index 15 */
+endfunc amu_group1_evtype_read_internal
+
+/*
+ * void amu_group1_evtype_write_internal(int idx, unsigned int val);
+ *
+ * Program the AMU event type register indexed by `idx`
+ * with the value `val`.
+ */
+func amu_group1_evtype_write_internal
+	adr	x2, 1f
+#if ENABLE_ASSERTIONS
+	/*
+	 * It can be dangerous to call this function with an
+	 * out of bounds index.  Ensure `idx` is valid.
+	 */
+	tst	x0, #~0xF
+	ASM_ASSERT(eq)
+
+	/* val should be between [0, 65535] */
+	tst	x1, #~0xFFFF
+	ASM_ASSERT(eq)
+#endif
+	/*
+	 * Given `idx` calculate address of msr/ret instruction pair
+	 * in the table below.
+	 */
+	add	x2, x2, x0, lsl #3	/* each msr/ret sequence is 8 bytes */
+#if ENABLE_BTI
+	add	x2, x2, x0, lsl #2	/* + "bti j" instruction */
+#endif
+	br	x2
+
+1:	write	AMEVTYPER10_EL0		/* index 0 */
+	write	AMEVTYPER11_EL0		/* index 1 */
+	write	AMEVTYPER12_EL0		/* index 2 */
+	write	AMEVTYPER13_EL0		/* index 3 */
+	write	AMEVTYPER14_EL0		/* index 4 */
+	write	AMEVTYPER15_EL0		/* index 5 */
+	write	AMEVTYPER16_EL0		/* index 6 */
+	write	AMEVTYPER17_EL0		/* index 7 */
+	write	AMEVTYPER18_EL0		/* index 8 */
+	write	AMEVTYPER19_EL0		/* index 9 */
+	write	AMEVTYPER1A_EL0		/* index 10 */
+	write	AMEVTYPER1B_EL0		/* index 11 */
+	write	AMEVTYPER1C_EL0		/* index 12 */
+	write	AMEVTYPER1D_EL0		/* index 13 */
+	write	AMEVTYPER1E_EL0		/* index 14 */
+	write	AMEVTYPER1F_EL0		/* index 15 */
+endfunc amu_group1_evtype_write_internal
+
+/*
+ * uint64_t amu_group1_num_counters_internal(int idx);
+ *
+ * Given `idx`, return the number of counters implemented for group 1.
+ */
+func amu_group1_num_counters_internal
+	mrs	x0, AMCGCR_EL0
+	ubfx	x0, x0, AMCGCR_EL0_CG1NC_SHIFT, AMCGCR_EL0_CG1NC_LENGTH
+	ret
+endfunc amu_group1_num_counters_internal
+
+/*
+ * uint64_t amu_group1_is_counter_implemented(int idx);
+ *
+ * Given `idx`, return whether counter `idx` is implemented  or not.
+ */
+func amu_group1_is_cnt_impl_internal
+#if ENABLE_ASSERTIONS
+	/*
+	 * It can be dangerous to call this function with an
+	 * out of bounds index.  Ensure `idx` is valid.
+	 */
+	tst	x0, #~0xF
+	ASM_ASSERT(eq)
+#endif
+	mrs	x1, AMCG1IDR_EL0
+	mov	x2, #1
+	lsl	x0, x2, x0
+	and	x0, x1, x0
+	ret
+endfunc amu_group1_is_cnt_impl_internal
diff --git a/lib/extensions/sme/aarch64/sme.c b/lib/extensions/sme/aarch64/sme.c
index ee21578..a7337ee 100644
--- a/lib/extensions/sme/aarch64/sme.c
+++ b/lib/extensions/sme/aarch64/sme.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2023, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021-2024, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -91,7 +91,7 @@
 {
 	u_register_t smcr_el2_val;
 
-	/* cap svq to arch supported max value */
+	/* Cap svq to arch supported max value. */
 	if (svq > SME_SVQ_ARCH_MAX) {
 		svq = SME_SVQ_ARCH_MAX;
 	}
@@ -145,3 +145,25 @@
 {
 	return ((read_smcr_el2() & SMCR_ELX_FA64_BIT) != 0U);
 }
+
+uint32_t sme_probe_svl(uint8_t sme_max_svq)
+{
+	uint32_t svl_bitmap = 0;
+	uint8_t svq, rdsvl_vq;
+
+	/* Cap svq to arch supported max value. */
+	if (sme_max_svq > SME_SVQ_ARCH_MAX) {
+		sme_max_svq = SME_SVQ_ARCH_MAX;
+	}
+
+	for (svq = 0; svq <= sme_max_svq; svq++) {
+		sme_config_svq(svq);
+		rdsvl_vq = SME_SVL_TO_SVQ(sme_rdsvl_1());
+		if (svl_bitmap & BIT_32(rdsvl_vq)) {
+			continue;
+		}
+		svl_bitmap |= BIT_32(rdsvl_vq);
+	}
+
+	return svl_bitmap;
+}
diff --git a/plat/arm/fvp/fvp_def.h b/plat/arm/fvp/fvp_def.h
index bcd3a7c..1d01bb2 100644
--- a/plat/arm/fvp/fvp_def.h
+++ b/plat/arm/fvp/fvp_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -28,11 +28,19 @@
  * FVP memory map related constants
  ******************************************************************************/
 
-#define DEVICE0_BASE		0x1a000000
-#define DEVICE0_SIZE		0x12200000
+#define DEVICE0_BASE			0x1a000000
+#define DEVICE0_SIZE			0x12200000
 
-#define DEVICE1_BASE		0x2f000000
-#define DEVICE1_SIZE		0x400000
+#define DEVICE1_BASE			0x2f000000
+#define DEVICE1_SIZE			0x400000
+
+/**
+ * NOTE: LS64_ATOMIC_DEVICE Memory Region (0x1d000000 - 0x1d00ffff) has been
+ * configured within the FVP to support only st64b/ld64b instructions.
+ * ldr/str instructions cannot be used to access this memory.
+ */
+#define LS64_ATOMIC_DEVICE_BASE		0x1d000000
+#define LS64_ATOMIC_DEVICE_SIZE		0x10000
 
 /*******************************************************************************
  * GIC-400 & interrupt handling related constants
diff --git a/plat/arm/fvp/plat_setup.c b/plat/arm/fvp/plat_setup.c
index fa97814..cb6f874 100644
--- a/plat/arm/fvp/plat_setup.c
+++ b/plat/arm/fvp/plat_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -30,6 +30,8 @@
 static const mmap_region_t mmap[] = {
 	MAP_REGION_FLAT(DEVICE0_BASE, DEVICE0_SIZE, MT_DEVICE | MT_RW | MT_NS),
 	MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_NS),
+	MAP_REGION_FLAT(LS64_ATOMIC_DEVICE_BASE, LS64_ATOMIC_DEVICE_SIZE,
+			MT_DEVICE | MT_RW | MT_NS),
 #if USE_NVM
 	MAP_REGION_FLAT(FLASH_BASE, FLASH_SIZE, MT_DEVICE | MT_RW | MT_NS),
 #endif
diff --git a/plat/arm/tc/platform.mk b/plat/arm/tc/platform.mk
index cec047c..14db89b 100644
--- a/plat/arm/tc/platform.mk
+++ b/plat/arm/tc/platform.mk
@@ -17,6 +17,8 @@
 $(eval $(call add_define,NS_BL1U_DEFINES,TC_MAX_PE_PER_CPU))
 $(eval $(call add_define,NS_BL2U_DEFINES,TC_MAX_PE_PER_CPU))
 
+$(eval $(call add_define,TFTF_DEFINES,TARGET_PLATFORM))
+
 PLAT_INCLUDES	+=	-Iplat/arm/tc/include/
 
 PLAT_SOURCES	:=	drivers/arm/gic/arm_gic_v2v3.c		\
diff --git a/plat/xilinx/versal_net/tests_to_skip.txt b/plat/xilinx/versal_net/tests_to_skip.txt
index d5c3a39..80e7cb2 100644
--- a/plat/xilinx/versal_net/tests_to_skip.txt
+++ b/plat/xilinx/versal_net/tests_to_skip.txt
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
+# Copyright (c) 2023-2024, Advanced Micro Devices, Inc. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -38,7 +38,7 @@
 FF-A Setup and Discovery/FF-A RXTX unmap SP rxtx buffer
 FF-A Setup and Discovery/Test FFA_PARTITION_INFO_GET v1.0
 FF-A Memory Sharing/Lend memory, clear flag set
-SIMD,SVE Registers context/Check that SIMD registers context is preserved
+SIMD context switch tests
 FF-A Interrupt
 FF-A Notifications
 
diff --git a/plat/xilinx/zynqmp/tests_to_skip.txt b/plat/xilinx/zynqmp/tests_to_skip.txt
index 9c32ae2..271fdc8 100644
--- a/plat/xilinx/zynqmp/tests_to_skip.txt
+++ b/plat/xilinx/zynqmp/tests_to_skip.txt
@@ -53,7 +53,7 @@
 EL3 power state parser validation
 
 #TESTS: SIMD
-SIMD,SVE Registers context/Check that SIMD registers context is preserved
+SIMD context switch tests
 
 #TESTS: psci-extensive
 PSCI CPU ON OFF Stress Tests/Repeated shutdown of all cores to stress test CPU_ON, CPU_SUSPEND and CPU_OFF
diff --git a/spm/cactus/cactus.mk b/spm/cactus/cactus.mk
index 0755749..9557745 100644
--- a/spm/cactus/cactus.mk
+++ b/spm/cactus/cactus.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2024, Arm Limited. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -46,6 +46,7 @@
 	)						\
 	$(addprefix spm/common/sp_tests/,		\
 		sp_test_ffa.c				\
+		sp_test_cpu.c				\
 	)						\
 	$(addprefix spm/cactus/cactus_tests/,		\
 		cactus_message_loop.c			\
diff --git a/spm/cactus/cactus_main.c b/spm/cactus/cactus_main.c
index b39f138..1d5cd97 100644
--- a/spm/cactus/cactus_main.c
+++ b/spm/cactus/cactus_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -327,8 +327,9 @@
 	discover_managed_exit_interrupt_id();
 	register_maintenance_interrupt_handlers();
 
-	/* Invoking Tests */
+	/* Invoking self tests */
 	ffa_tests(&mb);
+	cpu_feature_tests();
 
 msg_loop:
 	/* End up to message loop */
diff --git a/spm/common/sp_helpers.h b/spm/common/sp_helpers.h
index e0e749d..776cc75 100644
--- a/spm/common/sp_helpers.h
+++ b/spm/common/sp_helpers.h
@@ -8,6 +8,8 @@
 #define SP_HELPERS_H
 
 #include <stdint.h>
+
+#include <debug.h>
 #include <tftf_lib.h>
 #include <spm_common.h>
 #include <spinlock.h>
diff --git a/spm/common/sp_tests/sp_test_cpu.c b/spm/common/sp_tests/sp_test_cpu.c
new file mode 100644
index 0000000..a05dbf3
--- /dev/null
+++ b/spm/common/sp_tests/sp_test_cpu.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sp_helpers.h>
+
+#include <arch_features.h>
+
+static void cpu_check_id_regs(void)
+{
+	/* ID_AA64PFR0_EL1 */
+	EXPECT(is_feat_advsimd_present(), true);
+	EXPECT(is_feat_fp_present(), true);
+	EXPECT(is_armv8_2_sve_present(), false);
+
+	/* ID_AA64PFR1_EL1 */
+	EXPECT(is_feat_sme_supported(), false);
+}
+
+void cpu_feature_tests(void)
+{
+	const char *test_cpu_str = "CPU tests";
+
+	announce_test_section_start(test_cpu_str);
+	cpu_check_id_regs();
+	announce_test_section_end(test_cpu_str);
+}
diff --git a/spm/common/sp_tests/sp_test_ffa.c b/spm/common/sp_tests/sp_test_ffa.c
index c3774f9..ba63a0e 100644
--- a/spm/common/sp_tests/sp_test_ffa.c
+++ b/spm/common/sp_tests/sp_test_ffa.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -195,7 +195,7 @@
 	ffa_partition_info_wrong_test();
 }
 
-void ffa_version_test(void)
+static void ffa_version_test(void)
 {
 	struct ffa_value ret = ffa_version(FFA_VERSION_COMPILED);
 
@@ -212,7 +212,7 @@
 	EXPECT((int)compatible, (int)true);
 }
 
-void ffa_spm_id_get_test(void)
+static void ffa_spm_id_get_test(void)
 {
 	if (spm_version >= FFA_VERSION_1_1) {
 		struct ffa_value ret = ffa_spm_id_get();
@@ -236,9 +236,9 @@
 
 void ffa_tests(struct mailbox_buffers *mb)
 {
-	const char *test_ffa = "FF-A setup and discovery";
+	const char *test_ffa_str = "FF-A setup and discovery";
 
-	announce_test_section_start(test_ffa);
+	announce_test_section_start(test_ffa_str);
 
 	ffa_features_test();
 	ffa_version_test();
@@ -246,5 +246,5 @@
 	ffa_partition_info_get_test(mb);
 	ffa_partition_info_get_regs_test();
 
-	announce_test_section_end(test_ffa);
+	announce_test_section_end(test_ffa_str);
 }
diff --git a/spm/common/sp_tests/sp_tests.h b/spm/common/sp_tests/sp_tests.h
index 10d3b9b..007c2ca 100644
--- a/spm/common/sp_tests/sp_tests.h
+++ b/spm/common/sp_tests/sp_tests.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,9 +10,10 @@
 #include <spm_common.h>
 
 /*
- * Test functions
+ * Self test functions
  */
 
 void ffa_tests(struct mailbox_buffers *mb);
+void cpu_feature_tests(void);
 
 #endif /* CACTUS_TESTS_H */
diff --git a/tftf/tests/aarch32_tests_to_skip.txt b/tftf/tests/aarch32_tests_to_skip.txt
index 6913cb1..f05235d 100644
--- a/tftf/tests/aarch32_tests_to_skip.txt
+++ b/tftf/tests/aarch32_tests_to_skip.txt
@@ -1,10 +1,9 @@
 #
-# Copyright (c) 2023, Arm Limited. All rights reserved.
+# Copyright (c) 2023-2024, Arm Limited. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
 Realm payload at EL1
-SIMD,SVE Registers context
 Invalid memory access with RME extension
 FF-A Setup and Discovery
 SP exceptions
@@ -12,7 +11,7 @@
 FF-A Group0 interrupts
 FF-A Power management
 FF-A Memory Sharing
-SIMD,SVE Registers context
+SIMD context switch tests
 FF-A Interrupt
 SMMUv3 tests
 FF-A Notifications
diff --git a/tftf/tests/extensions/amu/test_amu.c b/tftf/tests/extensions/amu/test_amu.c
index 8d5c92b..21305a7 100644
--- a/tftf/tests/extensions/amu/test_amu.c
+++ b/tftf/tests/extensions/amu/test_amu.c
@@ -16,7 +16,8 @@
 #include <tftf_lib.h>
 #include <timer.h>
 
-#define SUSPEND_TIME_1_SEC	1000
+#define SUSPEND_TIME_1_SEC		1000
+#define MAX_MPMM_TEST_ITERATIONS 	100000U
 
 static volatile int wakeup_irq_received[PLATFORM_CORE_COUNT];
 
@@ -242,3 +243,53 @@
 
 	return TEST_RESULT_SUCCESS;
 }
+
+/*
+ * Check that group 1 counters read as 0 at ELs lower than EL3 when
+ * AMCR.CG1RZ is set.
+ */
+test_result_t test_amu_group1_raz(void)
+{
+/* Test on TC2 only, as MPMM not implemented in other platforms yet */
+#if PLAT_tc && (TARGET_PLATFORM == 2)
+	uint64_t counters_initial[AMU_GROUP1_NR_COUNTERS] = {0};
+	uint64_t counters_final[AMU_GROUP1_NR_COUNTERS] = {0};
+
+	for (unsigned int i = 0; i < amu_group1_num_counters(); i++) {
+		INFO("AMUEVTYPER1%x: 0x%llx\n", i, amu_group1_evtype_read(i));
+		counters_initial[i] = amu_group1_cnt_read(i);
+	}
+
+	for (int i = 0; i < MAX_MPMM_TEST_ITERATIONS; i++) {
+		// Instruction with activity count 1
+		__asm__ volatile("fmov	d0,xzr");
+		__asm__ volatile("fmov	d1,xzr");
+		__asm__ volatile("fmul	d2,d0,d1");
+		__asm__ volatile("fmov	d2,xzr");
+
+		__asm__ volatile("fmov	d0,xzr");
+		__asm__ volatile("fmov	d1,xzr");
+		__asm__ volatile("fmov	d2,xzr");
+		__asm__ volatile("fmadd	d3,d2,d1,d0");
+
+		// Instruction with activity count 2
+		__asm__ volatile("ptrue	p0.s, ALL");
+		__asm__ volatile("index	z10.s, #10,13");
+		__asm__ volatile("index	z11.s, #12,7");
+		__asm__ volatile("ucvtf	v10.4s, v10.4s");
+		__asm__ volatile("ucvtf	v11.4s, v11.4s");
+		__asm__ volatile("fadd	v0.4s, v10.4s, v11.4s");
+	}
+
+	for (unsigned int i = 0; i < amu_group1_num_counters(); i++) {
+		counters_final[i] = amu_group1_cnt_read(i);
+		if (counters_final[i] == counters_initial[i]) {
+			return TEST_RESULT_FAIL;
+		}
+	}
+
+	return TEST_RESULT_SUCCESS;
+#else
+	return TEST_RESULT_SKIPPED;
+#endif /* PLAT_tc && (TARGET_PLATFORM == 2) */
+}
diff --git a/tftf/tests/extensions/debugv8p9/test_debugv8p9.c b/tftf/tests/extensions/debugv8p9/test_debugv8p9.c
new file mode 100644
index 0000000..033aedb
--- /dev/null
+++ b/tftf/tests/extensions/debugv8p9/test_debugv8p9.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <debug.h>
+#include <arch_helpers.h>
+
+test_result_t test_debugv8p9_enabled(void)
+{
+	SKIP_TEST_IF_AARCH32();
+
+#if __aarch64__
+	SKIP_TEST_IF_DEBUGV8P9_NOT_SUPPORTED();
+
+	read_mdselr_el1();
+#endif
+	return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/extensions/fgt/test_fgt.c b/tftf/tests/extensions/fgt/test_fgt.c
index 5d9600d..e38f6c7 100644
--- a/tftf/tests/extensions/fgt/test_fgt.c
+++ b/tftf/tests/extensions/fgt/test_fgt.c
@@ -132,3 +132,25 @@
 	return TEST_RESULT_SUCCESS;
 #endif	/* __aarch64__ */
 }
+
+test_result_t test_fgt2_enabled(void)
+{
+	SKIP_TEST_IF_AARCH32();
+
+
+#ifdef __aarch64__
+	SKIP_TEST_IF_FGT2_NOT_SUPPORTED();
+
+	/* The following registers are read to test their presence when
+	 * FEAT_FGT2 is supported
+	 */
+
+	read_hfgitr2_el2();
+	read_hfgrtr2_el2();
+	read_hfgwtr2_el2();
+	read_hdfgrtr2_el2();
+	read_hdfgwtr2_el2();
+
+	return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/ls64/ls64_operations.S b/tftf/tests/extensions/ls64/ls64_operations.S
new file mode 100644
index 0000000..18f7a5f
--- /dev/null
+++ b/tftf/tests/extensions/ls64/ls64_operations.S
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+#if __aarch64__
+
+        .arch armv8.7-a
+	.globl	ls64_store
+	.globl	ls64_load
+
+/*
+ * Function to store 64 bytes of data from consecutive registers into a memory
+ * location in single-copy atomic operation via st64b instruction.
+ *
+ * x0:     Holds the base address of the input array of 8 64-bit integers.
+ * x1:     Holds the base address of the destination/output array of 8 64-bit
+ *         integers, where st64b does the single-copy atomic 64-byte store.
+ * x8-x15: Consecutive registers loaded with input array.
+ *
+ */
+func ls64_store
+	ldp	x8, x9, [x0, #0]	/* x0: Base address of Input Array */
+	ldp	x10, x11, [x0, #16]
+	ldp	x12, x13, [x0, #32]
+	ldp	x14, x15, [x0, #48]
+	st64b	x8, [x1]		/* x1: Address where 64-byte data to be stored */
+	ret
+endfunc ls64_store
+
+/*
+ * Function to load 64-byte of data from a memory location to eight consecutive
+ *  64-bit registers in single-copy atomic operation via ld64b instruction.
+ *
+ * x0: Holds the address of memory from where 64-byte of data to be loaded.
+ * x1: Holds the base address of the destination/output array of 8 64-bit integers.
+ * x9-x16: consecutive registers into which data will be copied with ld64b inst.
+ */
+
+func ls64_load
+	ld64b	x4, [x0]
+	stp	x4, x5, [x1, #0]	/* Base address of destination buffer */
+	stp	x6, x7, [x1, #16]
+	stp	x8, x9, [x1, #32]
+	stp	x10, x11, [x1, #48]
+	ret
+endfunc ls64_load
+
+#endif /* __aarch64__ */
diff --git a/tftf/tests/extensions/ls64/test_ls64.c b/tftf/tests/extensions/ls64/test_ls64.c
new file mode 100644
index 0000000..b7074ab
--- /dev/null
+++ b/tftf/tests/extensions/ls64/test_ls64.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "./test_ls64.h"
+#include <test_helpers.h>
+
+/*
+ * @brief Test LS64 feature support when the extension is enabled.
+ *
+ * Execute the LS64 instructions:
+ * LD64B   -  single-copy atomic 64-byte load.
+ * ST64B   -  single-copy atomic 64-byte store without return.
+ *
+ * These instructions should not be trapped to EL3, when EL2 access them.
+ *
+ * @return test_result_t
+ */
+test_result_t test_ls64_instructions(void)
+{
+#if PLAT_fvp
+#ifdef __aarch64__
+
+	/* Make sure FEAT_LS64 is supported. */
+	SKIP_TEST_IF_LS64_NOT_SUPPORTED();
+
+	uint64_t ls64_input_buffer[LS64_ARRAYSIZE] = {1, 2, 3, 4, 5, 6, 7, 8};
+	uint64_t ls64_output_buffer[LS64_ARRAYSIZE] = {0};
+	/*
+	 * Address where the data will be written to/read from with instructions
+	 * st64b and ld64b respectively.
+	 * Can only be in range (0x1d000000 - 0x1d00ffff) and be 64-byte aligned.
+	 */
+	uint64_t *store_address = (uint64_t *)LS64_ATOMIC_DEVICE_BASE;
+
+	/**
+	 * FEAT_LS64 : Execute LD64B and ST64B Instructions.
+	 * This test copies data from input buffer, an array of 8-64bit
+	 * unsigned integers to an output buffer via LD64B and ST64B
+	 * atomic operation instructions.
+	 *
+	 * NOTE: As we cannot pre-write into LS64_ATOMIC_DEVICE_BASE memory
+	 * via other instructions, we first load the data from a normal
+	 * input buffer into the consecutive registers and then copy them in one
+	 * atomic operation via st64b to Device memory(LS64_ATOMIC_DEVICE_BASE).
+	 * Further we load the data from the same device memory into a normal
+	 * output buffer through general registers and verify the buffers to
+	 * ensure instructions copied the data as per the architecture.
+	 */
+
+	ls64_store(ls64_input_buffer, store_address);
+	ls64_load(store_address, ls64_output_buffer);
+
+	for (uint8_t i = 0U; i < LS64_ARRAYSIZE; i++) {
+		VERBOSE("Input Buffer[%lld]=%lld\n", i, ls64_input_buffer[i]);
+		VERBOSE("Output Buffer[%lld]=%lld\n", i, ls64_output_buffer[i]);
+
+		if (ls64_input_buffer[i] != ls64_output_buffer[i]) {
+			return TEST_RESULT_FAIL;
+		}
+	}
+
+	return TEST_RESULT_SUCCESS;
+#else
+	/* Skip test if AArch32 */
+	SKIP_TEST_IF_AARCH32();
+#endif /* __aarch64_ */
+#else
+	tftf_testcase_printf("Test supported only on FVP \n");
+	return TEST_RESULT_SKIPPED;
+#endif /* PLAT_fvp */
+
+}
diff --git a/tftf/tests/extensions/ls64/test_ls64.h b/tftf/tests/extensions/ls64/test_ls64.h
new file mode 100644
index 0000000..35a00c2
--- /dev/null
+++ b/tftf/tests/extensions/ls64/test_ls64.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_LS64_H
+#define TEST_LS64_H
+
+#include <stdint.h>
+
+#define LS64_ARRAYSIZE 8
+
+void ls64_store(uint64_t *input_buffer, uint64_t *store_address);
+void ls64_load(uint64_t *store_address, uint64_t *output_buffer);
+
+#endif /* TEST_LS64_H */
diff --git a/tftf/tests/misc_tests/test_firmware_handoff.c b/tftf/tests/misc_tests/test_firmware_handoff.c
index bd565ae..00cebd3 100644
--- a/tftf/tests/misc_tests/test_firmware_handoff.c
+++ b/tftf/tests/misc_tests/test_firmware_handoff.c
@@ -20,8 +20,8 @@
 {
 	struct transfer_list_header *tl = (struct transfer_list_header *)ns_tl;
 
-	assert((uint32_t)tl_signature ==
-	       (REGISTER_CONVENTION_VERSION_MASK | TRANSFER_LIST_SIGNATURE));
+	assert(tl_signature ==
+		TRANSFER_LIST_HANDOFF_X1_VALUE(TRANSFER_LIST_VERSION));
 
 	if (transfer_list_check_header(tl) == TL_OPS_NON) {
 		return TEST_RESULT_FAIL;
diff --git a/tftf/tests/runtime_services/secure_service/test_spm_simd.c b/tftf/tests/runtime_services/secure_service/test_spm_simd.c
index cfc931f..baec1ac 100644
--- a/tftf/tests/runtime_services/secure_service/test_spm_simd.c
+++ b/tftf/tests/runtime_services/secure_service/test_spm_simd.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -21,6 +21,10 @@
 
 static sve_z_regs_t sve_vectors_input;
 static sve_z_regs_t sve_vectors_output;
+static sve_p_regs_t sve_predicates_input;
+static sve_p_regs_t sve_predicates_output;
+static sve_ffr_regs_t sve_ffr_input;
+static sve_ffr_regs_t sve_ffr_output;
 static int sve_op_1[NS_SVE_OP_ARRAYSIZE];
 static int sve_op_2[NS_SVE_OP_ARRAYSIZE];
 static fpu_state_t g_fpu_state_write;
@@ -38,7 +42,7 @@
 	/**********************************************************************
 	 * Verify that FF-A is there and that it has the correct version.
 	 **********************************************************************/
-	CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+	CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
 
 	fpu_state_write_rand(&g_fpu_state_write);
 	struct ffa_value ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
@@ -70,23 +74,13 @@
 	return TEST_RESULT_SUCCESS;
 }
 
-/*
- * Tests that SVE vectors are preserved during the context switches between
- * normal world and the secure world.
- * Fills the SVE vectors with known values, requests SP to fill the vectors
- * with a different values, checks that the context is restored on return.
- */
-test_result_t test_sve_vectors_preserved(void)
+static test_result_t test_sve_vectors_preserved_impl(uint8_t vq)
 {
-	uint64_t vl;
 	uint8_t *sve_vector;
+	uint64_t vl;
 
-	SKIP_TEST_IF_SVE_NOT_SUPPORTED();
-
-	/**********************************************************************
-	 * Verify that FF-A is there and that it has the correct version.
-	 **********************************************************************/
-	CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+	/* Configure requested VL. */
+	sve_config_vq(vq);
 
 	/*
 	 * Clear SVE vectors buffers used to compare the SVE state before calling
@@ -95,15 +89,12 @@
 	memset(sve_vectors_input, 0, sizeof(sve_vectors_input));
 	memset(sve_vectors_output, 0, sizeof(sve_vectors_output));
 
-	/* Set ZCR_EL2.LEN to implemented VL (constrained by EL3). */
-	write_zcr_el2(0xf);
-	isb();
-
-	/* Get the implemented VL. */
-	vl = sve_rdvl_1();
+	/* Vector length in bytes from vq. */
+	vl = SVE_VQ_TO_BYTES(vq);
 
 	/* Fill each vector for the VL size with a fixed pattern. */
 	sve_vector = (uint8_t *) sve_vectors_input;
+
 	for (uint32_t vector_num = 0U; vector_num < SVE_NUM_VECTORS; vector_num++) {
 		memset(sve_vector, 0x11 * (vector_num + 1), vl);
 		sve_vector += vl;
@@ -126,6 +117,11 @@
 		return TEST_RESULT_FAIL;
 	}
 
+	/* Check ZCR_EL2 was preserved. */
+	if (sve_read_zcr_elx() != vq) {
+		return TEST_RESULT_FAIL;
+	}
+
 	/* Get the SVE vectors state after returning to normal world. */
 	sve_z_regs_read(&sve_vectors_output);
 
@@ -137,13 +133,59 @@
 	return TEST_RESULT_SUCCESS;
 }
 
+static test_result_t helper_test_sve(test_result_t (*func)(uint8_t vq))
+{
+	uint32_t bitmap, vl_bitmap;
+	uint32_t vq = 0;
+	test_result_t ret;
+
+	SKIP_TEST_IF_SVE_NOT_SUPPORTED();
+
+	/**********************************************************************
+	 * Verify that FF-A is there and that it has the correct version.
+	 **********************************************************************/
+	CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+	/*
+	 * Check SVE state is preserved normal/secure accross world switches for
+	 * the discovered vector lengths.
+	 */
+	vl_bitmap = sve_probe_vl(SVE_VQ_ARCH_MAX);
+	for (bitmap = vl_bitmap; bitmap != 0U; bitmap >>= 1) {
+		if ((bitmap & 1) != 0) {
+			VERBOSE("Test VL %u bits.\n", SVE_VQ_TO_BITS(vq));
+
+			ret = func(vq);
+			if (ret != TEST_RESULT_SUCCESS) {
+				return ret;
+			}
+		}
+		vq++;
+	}
+
+	return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Tests that SVE vectors are preserved during the context switches between
+ * normal world and the secure world.
+ * Probe all possible vector length that the platform implements.
+ * For each vector length value:
+ * -Fill SVE vectors with known values.
+ * -Request SP to fill NEON vectors with different values.
+ * -Check the SVE context is restored on return.
+ */
+test_result_t test_sve_vectors_preserved(void)
+{
+	return helper_test_sve(test_sve_vectors_preserved_impl);
+}
+
 /*
  * Sends SIMD fill command to Cactus SP
  * Returns:
  *	false - On success
  *	true  - On failure
  */
-#ifdef __aarch64__
 static bool callback_enter_cactus_sp(void)
 {
 	struct ffa_value ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
@@ -158,7 +200,6 @@
 
 	return false;
 }
-#endif /* __aarch64__ */
 
 /*
  * Tests that SVE vector operations in normal world are not affected by context
@@ -174,7 +215,7 @@
 	/**********************************************************************
 	 * Verify that FF-A is there and that it has the correct version.
 	 **********************************************************************/
-	CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+	CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
 
 	val = 2 * SVE_TEST_ITERATIONS;
 
@@ -184,7 +225,7 @@
 	}
 
 	/* Set ZCR_EL2.LEN to implemented VL (constrained by EL3). */
-	write_zcr_el2(0xf);
+	write_zcr_el2(SVE_VQ_ARCH_MAX);
 	isb();
 
 	for (unsigned int i = 0; i < SVE_TEST_ITERATIONS; i++) {
@@ -209,3 +250,298 @@
 
 	return TEST_RESULT_SUCCESS;
 }
+
+/*
+ * This base function probes all possible Streaming SVE vector length
+ * values and calls the function passed as parameter with each discovered
+ * value.
+ */
+static test_result_t helper_test_sme(test_result_t (*func)(uint8_t svq))
+{
+	uint32_t svl_bitmap;
+	uint8_t svq;
+	test_result_t ret;
+
+	SKIP_TEST_IF_AARCH32();
+
+	/* Skip the test if SME is not supported. */
+	SKIP_TEST_IF_SME_NOT_SUPPORTED();
+
+	CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+	svl_bitmap = sme_probe_svl(SME_SVQ_ARCH_MAX);
+	svq = 0;
+	for (uint32_t bitmap = svl_bitmap; bitmap != 0U; bitmap >>= 1) {
+		if ((bitmap & 1) != 0) {
+			VERBOSE("Test SVL %u bits.\n", SVE_VQ_TO_BITS(svq));
+			ret = func(svq);
+			if (ret != TEST_RESULT_SUCCESS) {
+				return ret;
+			}
+		}
+
+		svq++;
+	}
+
+	return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t do_test_sme_streaming_sve(uint8_t svq)
+{
+	struct ffa_value ret;
+
+	/* Enable SME FA64 if implemented. */
+	if (is_feat_sme_fa64_supported()) {
+		sme_enable_fa64();
+	}
+
+	/* Enter Streaming SVE mode. */
+	sme_smstart(SMSTART_SM);
+
+	/* Configure SVQ. */
+	sme_config_svq((uint32_t)svq);
+
+	sve_z_regs_write_rand(&sve_vectors_input);
+	sve_p_regs_write_rand(&sve_predicates_input);
+	fpu_cs_regs_write_rand(&g_fpu_state_write.cs_regs);
+
+	if (is_feat_sme_fa64_supported()) {
+		sve_ffr_regs_write_rand(&sve_ffr_input);
+	}
+
+	ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
+
+	if (!is_expected_cactus_response(ret, CACTUS_SUCCESS, 0)) {
+		return TEST_RESULT_FAIL;
+	}
+
+	if (is_feat_sme_fa64_supported()) {
+		if (!sme_feat_fa64_enabled()) {
+			ERROR("FA64 trap bit disabled, expected enabled.\n");
+			return TEST_RESULT_FAIL;
+		}
+	}
+
+	/* Expect Streaming SVE to be active. */
+	if (!sme_smstat_sm()) {
+		ERROR("Streaming SVE disabled, expected enabled.\n");
+		return TEST_RESULT_FAIL;
+	}
+
+	sve_z_regs_read(&sve_vectors_output);
+	if (sve_z_regs_compare(&sve_vectors_input, &sve_vectors_output) != 0) {
+		ERROR("SME Z vectors compare failed.");
+		return TEST_RESULT_FAIL;
+	}
+
+	sve_p_regs_read(&sve_predicates_output);
+	if (sve_p_regs_compare(&sve_predicates_input, &sve_predicates_output) != 0) {
+		ERROR("SME predicates compare failed.");
+		return TEST_RESULT_FAIL;
+	}
+
+	fpu_cs_regs_read(&g_fpu_state_read.cs_regs);
+	if (fpu_cs_regs_compare(&g_fpu_state_write.cs_regs, &g_fpu_state_read.cs_regs) != 0) {
+		ERROR("FPU control/status compare failed.");
+		return TEST_RESULT_FAIL;
+	}
+
+	if (is_feat_sme_fa64_supported()) {
+		sve_ffr_regs_read(&sve_ffr_output);
+		if (sve_ffr_regs_compare(&sve_ffr_input, &sve_ffr_output) != 0) {
+			ERROR("SVE FFR register compare failed.");
+			return TEST_RESULT_FAIL;
+		}
+	}
+
+	/* Exit Streaming SVE mode. */
+	sme_smstop(SMSTOP_SM);
+
+	return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * SME enter SPMC with SSVE enabled.
+ *
+ * Check Streaming SVE is preserved on a normal/secure world switch.
+ *
+ */
+test_result_t test_sme_streaming_sve(void)
+{
+	return helper_test_sme(do_test_sme_streaming_sve);
+}
+
+static test_result_t do_test_sme_za(uint8_t vq)
+{
+	struct ffa_value ret;
+
+	/* Enable SME FA64 if implemented. */
+	if (is_feat_sme_fa64_supported()) {
+		sme_enable_fa64();
+	}
+
+	/* Enable SME ZA Array Storage */
+	sme_smstart(SMSTART_ZA);
+
+	/* Configure VQ. */
+	sve_config_vq(vq);
+
+	sve_z_regs_write_rand(&sve_vectors_input);
+	sve_p_regs_write_rand(&sve_predicates_input);
+	fpu_cs_regs_write_rand(&g_fpu_state_write.cs_regs);
+	sve_ffr_regs_write_rand(&sve_ffr_input);
+
+	ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
+	if (!is_ffa_direct_response(ret)) {
+		return TEST_RESULT_FAIL;
+	}
+
+	if (cactus_get_response(ret) == CACTUS_ERROR) {
+		return TEST_RESULT_FAIL;
+	}
+
+	if (is_feat_sme_fa64_supported()) {
+		if (!sme_feat_fa64_enabled()) {
+			ERROR("FA64 trap bit disabled, expected enabled.\n");
+			return TEST_RESULT_FAIL;
+		}
+	}
+
+	/* Expect Streaming SVE to be inactive. */
+	if (sme_smstat_sm()) {
+		ERROR("Streaming SVE enabled, expected disabled.\n");
+		return TEST_RESULT_FAIL;
+	}
+
+
+	sve_z_regs_read(&sve_vectors_output);
+	if (sve_z_regs_compare(&sve_vectors_input, &sve_vectors_output) != 0) {
+		ERROR("SME Z vectors compare failed.");
+		return TEST_RESULT_FAIL;
+	}
+
+	sve_p_regs_read(&sve_predicates_output);
+	if (sve_p_regs_compare(&sve_predicates_input, &sve_predicates_output) != 0) {
+		ERROR("SME predicates compare failed.");
+		return TEST_RESULT_FAIL;
+	}
+
+	fpu_cs_regs_read(&g_fpu_state_read.cs_regs);
+	if (fpu_cs_regs_compare(&g_fpu_state_write.cs_regs, &g_fpu_state_read.cs_regs) != 0) {
+		ERROR("FPU control/status compare failed.");
+		return TEST_RESULT_FAIL;
+	}
+
+	sve_ffr_regs_read(&sve_ffr_output);
+	if (sve_ffr_regs_compare(&sve_ffr_input, &sve_ffr_output) != 0) {
+		ERROR("SVE FFR register compare failed.");
+		return TEST_RESULT_FAIL;
+	}
+
+	/* Disable SME ZA array storage. */
+	sme_smstop(SMSTOP_ZA);
+
+	return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * SME enter SPMC with ZA enabled.
+ *
+ * Check ZA array enabled is preserved on a normal/secure world switch.
+ */
+test_result_t test_sme_za(void)
+{
+	SKIP_TEST_IF_AARCH32();
+
+	/* Skip the test if SME is not supported. */
+	SKIP_TEST_IF_SME_NOT_SUPPORTED();
+
+	return helper_test_sve(do_test_sme_za);
+}
+
+static test_result_t do_test_sme_streaming_sve_za(uint8_t svq)
+{
+	struct ffa_value ret;
+
+	/* Enable SME FA64 if implemented. */
+	if (is_feat_sme_fa64_supported()) {
+		sme_enable_fa64();
+	}
+
+	/* Enable SME SSVE + ZA. */
+	sme_smstart(SMSTART);
+
+	/* Configure SVQ. */
+	sme_config_svq((uint32_t)svq);
+
+	sve_z_regs_write_rand(&sve_vectors_input);
+	sve_p_regs_write_rand(&sve_predicates_input);
+	fpu_cs_regs_write_rand(&g_fpu_state_write.cs_regs);
+
+	if (is_feat_sme_fa64_supported()) {
+		sve_ffr_regs_write_rand(&sve_ffr_input);
+	}
+
+	ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
+	if (!is_ffa_direct_response(ret)) {
+		return TEST_RESULT_FAIL;
+	}
+
+	if (cactus_get_response(ret) == CACTUS_ERROR) {
+		return TEST_RESULT_FAIL;
+	}
+
+	if (is_feat_sme_fa64_supported()) {
+		if (!sme_feat_fa64_enabled()) {
+			return TEST_RESULT_FAIL;
+		}
+	}
+
+	/* Expect Streaming SVE to be active. */
+	if (!sme_smstat_sm()) {
+		ERROR("Streaming SVE disabled, expected enabled.\n");
+		return TEST_RESULT_FAIL;
+	}
+
+	sve_z_regs_read(&sve_vectors_output);
+	if (sve_z_regs_compare(&sve_vectors_input, &sve_vectors_output) != 0) {
+		ERROR("SME Z vectors compare failed.");
+		return TEST_RESULT_FAIL;
+	}
+
+	sve_p_regs_read(&sve_predicates_output);
+	if (sve_p_regs_compare(&sve_predicates_input, &sve_predicates_output) != 0) {
+		ERROR("SME predicates compare failed.");
+		return TEST_RESULT_FAIL;
+	}
+
+	fpu_cs_regs_read(&g_fpu_state_read.cs_regs);
+	if (fpu_cs_regs_compare(&g_fpu_state_write.cs_regs, &g_fpu_state_read.cs_regs) != 0) {
+		ERROR("FPU control/status compare failed.");
+		return TEST_RESULT_FAIL;
+	}
+
+	if (is_feat_sme_fa64_supported()) {
+		sve_ffr_regs_read(&sve_ffr_output);
+		if (sve_ffr_regs_compare(&sve_ffr_input, &sve_ffr_output) != 0) {
+			ERROR("SVE FFR register compare failed.");
+			return TEST_RESULT_FAIL;
+		}
+	}
+
+	/* Disable SSVE + ZA. */
+	sme_smstop(SMSTOP);
+
+	return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * SME enter SPMC with SSVE+ZA enabled.
+ *
+ * Check Streaming SVE and ZA array enabled are preserved on a
+ * normal/secure world switch.
+ */
+test_result_t test_sme_streaming_sve_za(void)
+{
+	return helper_test_sme(do_test_sme_streaming_sve_za);
+}
diff --git a/tftf/tests/tests-cpu-extensions.mk b/tftf/tests/tests-cpu-extensions.mk
index b0af1a3..144694e 100644
--- a/tftf/tests/tests-cpu-extensions.mk
+++ b/tftf/tests/tests-cpu-extensions.mk
@@ -8,12 +8,15 @@
 	extensions/afp/test_afp.c					\
 	extensions/amu/test_amu.c					\
 	extensions/brbe/test_brbe.c					\
+	extensions/debugv8p9/test_debugv8p9.c				\
 	extensions/ecv/test_ecv.c					\
 	extensions/fgt/test_fgt.c					\
-	extensions/pmuv3/test_pmuv3.c					\
+	extensions/ls64/test_ls64.c 					\
+	extensions/ls64/ls64_operations.S				\
 	extensions/mpam/test_mpam.c					\
 	extensions/mte/test_mte.c					\
 	extensions/pauth/test_pauth.c					\
+	extensions/pmuv3/test_pmuv3.c					\
 	extensions/sme/test_sme.c					\
 	extensions/sme/test_sme2.c					\
 	extensions/spe/test_spe.c					\
diff --git a/tftf/tests/tests-cpu-extensions.xml b/tftf/tests/tests-cpu-extensions.xml
index 3b93344..62bb9ea 100644
--- a/tftf/tests/tests-cpu-extensions.xml
+++ b/tftf/tests/tests-cpu-extensions.xml
@@ -11,7 +11,9 @@
   <testsuite name="CPU extensions" description="Various CPU extensions tests">
     <testcase name="AMUv1 valid counter values" function="test_amu_valid_ctr" />
     <testcase name="AMUv1 suspend/resume" function="test_amu_suspend_resume" />
+    <testcase name="AMUv1 group 1 RAZ" function="test_amu_group1_raz" />
     <testcase name="SVE support" function="test_sve_support" />
+    <testcase name="Debugv8p9 support" function="test_debugv8p9_enabled" />
     <testcase name="Access Pointer Authentication Registers" function="test_pauth_reg_access" />
     <testcase name="Use Pointer Authentication Instructions" function="test_pauth_instructions" />
     <testcase name="Check for Pointer Authentication key leakage from EL3" function="test_pauth_leakage" />
@@ -20,6 +22,7 @@
     <testcase name="Use MTE Instructions" function="test_mte_instructions" />
     <testcase name="Check for MTE register leakage" function="test_mte_leakage" />
     <testcase name="Use FGT Registers" function="test_fgt_enabled" />
+    <testcase name="Use FGT2 Registers" function="test_fgt2_enabled" />
     <testcase name="Use ECV Registers" function="test_ecv_enabled" />
     <testcase name="Use trace buffer control Registers" function="test_trbe_enabled" />
     <testcase name="Use branch record buffer control registers" function="test_brbe_enabled" />
@@ -34,6 +37,7 @@
     <testcase name="PMUv3 cycle counter functional in NS" function="test_pmuv3_cycle_works_ns" />
     <testcase name="PMUv3 event counter functional in NS" function="test_pmuv3_event_works_ns" />
     <testcase name="PMUv3 SMC counter preservation" function="test_pmuv3_el3_preserves" />
+    <testcase name="LS64 support" function="test_ls64_instructions" />
   </testsuite>
 
   <testsuite name="ARM_ARCH_SVC" description="Arm Architecture Service tests">
diff --git a/tftf/tests/tests-spm.xml b/tftf/tests/tests-spm.xml
index 5658d62..7c5a8c3 100644
--- a/tftf/tests/tests-spm.xml
+++ b/tftf/tests/tests-spm.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="utf-8"?>
 
 <!--
-  Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+  Copyright (c) 2018-2024, Arm Limited. All rights reserved.
 
   SPDX-License-Identifier: BSD-3-Clause
 -->
@@ -143,14 +143,20 @@
                function="test_ffa_memory_retrieve_request_from_vm" />
   </testsuite>
 
-  <testsuite name="SIMD,SVE Registers context"
+  <testsuite name="SIMD context switch tests"
              description="Validate context switch between NWd and SWd" >
-     <testcase name="Check that SIMD registers context is preserved"
+     <testcase name="Check that Adv. SIMD registers context is preserved"
                function="test_simd_vectors_preserved" />
      <testcase name="Check that SVE registers context is preserved"
                function="test_sve_vectors_preserved" />
      <testcase name="Check that SVE operations in NWd are unaffected by SWd"
                function="test_sve_vectors_operations" />
+     <testcase name="Enter SPMC with SME SSVE enabled"
+               function="test_sme_streaming_sve" />
+     <testcase name="Enter SPMC with SME ZA enabled"
+               function="test_sme_za" />
+     <testcase name="Enter SPMC with SME SM+ZA enabled"
+               function="test_sme_streaming_sve_za" />
   </testsuite>
 
    <testsuite name="FF-A Interrupt"