feat(amu): test AMU counter restriction (RAZ)
When using AMU counters, there is risk of exposing information to
lower exception levels. In order to prevent this, counters are
restricted, so they are read as zero (RAZ) at a lower EL. This
test verifies that counters are read as zero after forcing counting
through instructions that trigger MPMM "gear shifting" (e.g.: by
executing SVE instructions).
Note: This test applies to TC2 only, as it is the only platform that
supports MPMM currently.
Signed-off-by: Juan Pablo Conde <juanpablo.conde@arm.com>
Change-Id: Ic32ba19fa489cf479947d4467ddb84e6abd1b454
diff --git a/Makefile b/Makefile
index f88cd3f..b848c23 100644
--- a/Makefile
+++ b/Makefile
@@ -196,6 +196,7 @@
$(eval $(call add_define,TFTF_DEFINES,USE_NVM))
$(eval $(call add_define,TFTF_DEFINES,ENABLE_REALM_PAYLOAD_TESTS))
$(eval $(call add_define,TFTF_DEFINES,TRANSFER_LIST))
+$(eval $(call add_define,TFTF_DEFINES,PLAT_AMU_GROUP1_COUNTERS_MASK))
################################################################################
diff --git a/include/lib/extensions/amu.h b/include/lib/extensions/amu.h
index d5950ca..3ce053d 100644
--- a/include/lib/extensions/amu.h
+++ b/include/lib/extensions/amu.h
@@ -71,7 +71,11 @@
#if AMU_GROUP1_NR_COUNTERS
uint64_t amu_group1_cnt_read(unsigned int idx);
+uint64_t amu_group1_num_counters(void);
+uint64_t amu_group1_evtype_read(unsigned int idx);
+void amu_group1_evtype_write(unsigned int idx, uint64_t val);
#if __aarch64__
+uint64_t amu_group1_is_counter_implemented(unsigned int idx);
uint64_t amu_group1_voffset_read(unsigned int idx);
void amu_group1_voffset_write(unsigned int idx, uint64_t val);
#endif
diff --git a/include/lib/extensions/amu_private.h b/include/lib/extensions/amu_private.h
index 7ae17d9..b98178f 100644
--- a/include/lib/extensions/amu_private.h
+++ b/include/lib/extensions/amu_private.h
@@ -11,6 +11,10 @@
uint64_t amu_group0_cnt_read_internal(unsigned int idx);
uint64_t amu_group1_cnt_read_internal(unsigned int idx);
+uint64_t amu_group1_num_counters_internal(void);
+uint64_t amu_group1_is_cnt_impl_internal(unsigned int idx);
+void amu_group1_evtype_write_internal(unsigned int idx, uint64_t val);
+uint64_t amu_group1_evtype_read_internal(unsigned int idx);
#if __aarch64__
uint64_t amu_group0_voffset_read_internal(unsigned int idx);
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index 0a1e653..491edde 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -75,11 +75,58 @@
uint64_t amu_group1_cnt_read(unsigned int idx)
{
assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
- assert(idx < AMU_GROUP1_NR_COUNTERS);
+ assert(amu_group1_supported());
+ assert(idx < amu_group1_num_counters());
return amu_group1_cnt_read_internal(idx);
}
+/* Return the number of counters available for group 1 */
+uint64_t amu_group1_num_counters(void)
+{
+ assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
+ assert(amu_group1_supported());
+
+ uint64_t num_counters = amu_group1_num_counters_internal();
+ if (num_counters < AMU_GROUP1_NR_COUNTERS) {
+ return num_counters;
+ }
+ return AMU_GROUP1_NR_COUNTERS;
+}
+
+/* Return the type for group 1 counter with index `idx`. */
+uint64_t amu_group1_evtype_read(unsigned int idx)
+{
+ assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
+ assert(amu_group1_supported());
+ assert(idx < amu_group1_num_counters());
+
+ return amu_group1_evtype_read_internal(idx);
+}
+
+/* Set the type for group 1 counter with index `idx`. */
+void amu_group1_evtype_write(unsigned int idx, uint64_t val)
+{
+ assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
+ assert(amu_group1_supported());
+ assert(idx < amu_group1_num_counters());
+
+ amu_group1_evtype_write_internal(idx, val);
+}
+
+/*
+ * Return whether group 1 counter at index `idx` is implemented.
+ *
+ * Using this function requires v8.6 FEAT_AMUv1p1 support.
+ */
+uint64_t amu_group1_is_counter_implemented(unsigned int idx)
+{
+ assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
+ assert(amu_group1_supported());
+
+ return amu_group1_is_cnt_impl_internal(idx);
+}
+
/*
* Read the group 1 offset register for a given index.
*
diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S
index b15daf3..1f2ae5c 100644
--- a/lib/extensions/amu/aarch64/amu_helpers.S
+++ b/lib/extensions/amu/aarch64/amu_helpers.S
@@ -10,6 +10,10 @@
.globl amu_group0_cnt_read_internal
.globl amu_group1_cnt_read_internal
+ .globl amu_group1_evtype_write_internal
+ .globl amu_group1_evtype_read_internal
+ .globl amu_group1_num_counters_internal
+ .globl amu_group1_is_cnt_impl_internal
/* FEAT_AMUv1p1 virtualisation offset register functions */
.globl amu_group0_voffset_read_internal
@@ -258,3 +262,127 @@
write AMEVCNTVOFF1E_EL2 /* index 14 */
write AMEVCNTVOFF1F_EL2 /* index 15 */
endfunc amu_group1_voffset_write_internal
+
+/*
+ * uint64_t amu_group1_evtype_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU event type register
+ * and return it in `x0`.
+ */
+func amu_group1_evtype_read_internal
+ adr x1, 1f
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ tst x0, #~0xF
+ ASM_ASSERT(eq)
+#endif
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ add x1, x1, x0, lsl #3 /* each mrs/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x1, x1, x0, lsl #2 /* + "bti j" instruction */
+#endif
+ br x1
+
+1: read AMEVTYPER10_EL0 /* index 0 */
+ read AMEVTYPER11_EL0 /* index 1 */
+ read AMEVTYPER12_EL0 /* index 2 */
+ read AMEVTYPER13_EL0 /* index 3 */
+ read AMEVTYPER14_EL0 /* index 4 */
+ read AMEVTYPER15_EL0 /* index 5 */
+ read AMEVTYPER16_EL0 /* index 6 */
+ read AMEVTYPER17_EL0 /* index 7 */
+ read AMEVTYPER18_EL0 /* index 8 */
+ read AMEVTYPER19_EL0 /* index 9 */
+ read AMEVTYPER1A_EL0 /* index 10 */
+ read AMEVTYPER1B_EL0 /* index 11 */
+ read AMEVTYPER1C_EL0 /* index 12 */
+ read AMEVTYPER1D_EL0 /* index 13 */
+ read AMEVTYPER1E_EL0 /* index 14 */
+ read AMEVTYPER1F_EL0 /* index 15 */
+endfunc amu_group1_evtype_read_internal
+
+/*
+ * void amu_group1_evtype_write_internal(int idx, unsigned int val);
+ *
+ * Program the AMU event type register indexed by `idx`
+ * with the value `val`.
+ */
+func amu_group1_evtype_write_internal
+ adr x2, 1f
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ tst x0, #~0xF
+ ASM_ASSERT(eq)
+
+ /* val should be between [0, 65535] */
+ tst x1, #~0xFFFF
+ ASM_ASSERT(eq)
+#endif
+ /*
+ * Given `idx` calculate address of msr/ret instruction pair
+ * in the table below.
+ */
+ add x2, x2, x0, lsl #3 /* each msr/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x2, x2, x0, lsl #2 /* + "bti j" instruction */
+#endif
+ br x2
+
+1: write AMEVTYPER10_EL0 /* index 0 */
+ write AMEVTYPER11_EL0 /* index 1 */
+ write AMEVTYPER12_EL0 /* index 2 */
+ write AMEVTYPER13_EL0 /* index 3 */
+ write AMEVTYPER14_EL0 /* index 4 */
+ write AMEVTYPER15_EL0 /* index 5 */
+ write AMEVTYPER16_EL0 /* index 6 */
+ write AMEVTYPER17_EL0 /* index 7 */
+ write AMEVTYPER18_EL0 /* index 8 */
+ write AMEVTYPER19_EL0 /* index 9 */
+ write AMEVTYPER1A_EL0 /* index 10 */
+ write AMEVTYPER1B_EL0 /* index 11 */
+ write AMEVTYPER1C_EL0 /* index 12 */
+ write AMEVTYPER1D_EL0 /* index 13 */
+ write AMEVTYPER1E_EL0 /* index 14 */
+ write AMEVTYPER1F_EL0 /* index 15 */
+endfunc amu_group1_evtype_write_internal
+
+/*
+ * uint64_t amu_group1_num_counters_internal(int idx);
+ *
+ * Given `idx`, return the number of counters implemented for group 1.
+ */
+func amu_group1_num_counters_internal
+ mrs x0, AMCGCR_EL0
+ ubfx x0, x0, AMCGCR_EL0_CG1NC_SHIFT, AMCGCR_EL0_CG1NC_LENGTH
+ ret
+endfunc amu_group1_num_counters_internal
+
+/*
+ * uint64_t amu_group1_is_counter_implemented(int idx);
+ *
+ * Given `idx`, return whether counter `idx` is implemented or not.
+ */
+func amu_group1_is_cnt_impl_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ tst x0, #~0xF
+ ASM_ASSERT(eq)
+#endif
+ mrs x1, AMCG1IDR_EL0
+ mov x2, #1
+ lsl x0, x2, x0
+ and x0, x1, x0
+ ret
+endfunc amu_group1_is_cnt_impl_internal
diff --git a/plat/arm/tc/platform.mk b/plat/arm/tc/platform.mk
index cec047c..14db89b 100644
--- a/plat/arm/tc/platform.mk
+++ b/plat/arm/tc/platform.mk
@@ -17,6 +17,8 @@
$(eval $(call add_define,NS_BL1U_DEFINES,TC_MAX_PE_PER_CPU))
$(eval $(call add_define,NS_BL2U_DEFINES,TC_MAX_PE_PER_CPU))
+$(eval $(call add_define,TFTF_DEFINES,TARGET_PLATFORM))
+
PLAT_INCLUDES += -Iplat/arm/tc/include/
PLAT_SOURCES := drivers/arm/gic/arm_gic_v2v3.c \
diff --git a/tftf/tests/extensions/amu/test_amu.c b/tftf/tests/extensions/amu/test_amu.c
index 8d5c92b..21305a7 100644
--- a/tftf/tests/extensions/amu/test_amu.c
+++ b/tftf/tests/extensions/amu/test_amu.c
@@ -16,7 +16,8 @@
#include <tftf_lib.h>
#include <timer.h>
-#define SUSPEND_TIME_1_SEC 1000
+#define SUSPEND_TIME_1_SEC 1000
+#define MAX_MPMM_TEST_ITERATIONS 100000U
static volatile int wakeup_irq_received[PLATFORM_CORE_COUNT];
@@ -242,3 +243,53 @@
return TEST_RESULT_SUCCESS;
}
+
+/*
+ * Check that group 1 counters read as 0 at ELs lower than EL3 when
+ * AMCR.CG1RZ is set.
+ */
+test_result_t test_amu_group1_raz(void)
+{
+/* Test on TC2 only, as MPMM not implemented in other platforms yet */
+#if PLAT_tc && (TARGET_PLATFORM == 2)
+ uint64_t counters_initial[AMU_GROUP1_NR_COUNTERS] = {0};
+ uint64_t counters_final[AMU_GROUP1_NR_COUNTERS] = {0};
+
+ for (unsigned int i = 0; i < amu_group1_num_counters(); i++) {
+ INFO("AMUEVTYPER1%x: 0x%llx\n", i, amu_group1_evtype_read(i));
+ counters_initial[i] = amu_group1_cnt_read(i);
+ }
+
+ for (int i = 0; i < MAX_MPMM_TEST_ITERATIONS; i++) {
+ // Instruction with activity count 1
+ __asm__ volatile("fmov d0,xzr");
+ __asm__ volatile("fmov d1,xzr");
+ __asm__ volatile("fmul d2,d0,d1");
+ __asm__ volatile("fmov d2,xzr");
+
+ __asm__ volatile("fmov d0,xzr");
+ __asm__ volatile("fmov d1,xzr");
+ __asm__ volatile("fmov d2,xzr");
+ __asm__ volatile("fmadd d3,d2,d1,d0");
+
+ // Instruction with activity count 2
+ __asm__ volatile("ptrue p0.s, ALL");
+ __asm__ volatile("index z10.s, #10,13");
+ __asm__ volatile("index z11.s, #12,7");
+ __asm__ volatile("ucvtf v10.4s, v10.4s");
+ __asm__ volatile("ucvtf v11.4s, v11.4s");
+ __asm__ volatile("fadd v0.4s, v10.4s, v11.4s");
+ }
+
+ for (unsigned int i = 0; i < amu_group1_num_counters(); i++) {
+ counters_final[i] = amu_group1_cnt_read(i);
+ if (counters_final[i] == counters_initial[i]) {
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+#else
+ return TEST_RESULT_SKIPPED;
+#endif /* PLAT_tc && (TARGET_PLATFORM == 2) */
+}
diff --git a/tftf/tests/tests-cpu-extensions.xml b/tftf/tests/tests-cpu-extensions.xml
index 3b93344..bd6abdd 100644
--- a/tftf/tests/tests-cpu-extensions.xml
+++ b/tftf/tests/tests-cpu-extensions.xml
@@ -11,6 +11,7 @@
<testsuite name="CPU extensions" description="Various CPU extensions tests">
<testcase name="AMUv1 valid counter values" function="test_amu_valid_ctr" />
<testcase name="AMUv1 suspend/resume" function="test_amu_suspend_resume" />
+ <testcase name="AMUv1 group 1 RAZ" function="test_amu_group1_raz" />
<testcase name="SVE support" function="test_sve_support" />
<testcase name="Access Pointer Authentication Registers" function="test_pauth_reg_access" />
<testcase name="Use Pointer Authentication Instructions" function="test_pauth_instructions" />