feat(amu): test AMU counter restriction (RAZ)

When using AMU counters, there is risk of exposing information to
lower exception levels. In order to prevent this, counters are
restricted, so they are read as zero (RAZ) at a lower EL. This
test verifies that counters are read as zero after forcing counting
through instructions that trigger MPMM "gear shifting" (e.g.: by
executing SVE instructions).

Note: This test applies to TC2 only, as it is the only platform that
      supports MPMM currently.

Signed-off-by: Juan Pablo Conde <juanpablo.conde@arm.com>
Change-Id: Ic32ba19fa489cf479947d4467ddb84e6abd1b454
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index 0a1e653..491edde 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -75,11 +75,58 @@
 uint64_t amu_group1_cnt_read(unsigned int idx)
 {
 	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
-	assert(idx < AMU_GROUP1_NR_COUNTERS);
+	assert(amu_group1_supported());
+	assert(idx < amu_group1_num_counters());
 
 	return amu_group1_cnt_read_internal(idx);
 }
 
+/* Return the number of counters available for group 1 */
+uint64_t amu_group1_num_counters(void)
+{
+	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
+	assert(amu_group1_supported());
+
+	uint64_t num_counters = amu_group1_num_counters_internal();
+	if (num_counters < AMU_GROUP1_NR_COUNTERS) {
+		return num_counters;
+	}
+	return AMU_GROUP1_NR_COUNTERS;
+}
+
+/* Return the type for group 1  counter with index `idx`. */
+uint64_t amu_group1_evtype_read(unsigned int idx)
+{
+	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
+	assert(amu_group1_supported());
+	assert(idx < amu_group1_num_counters());
+
+	return amu_group1_evtype_read_internal(idx);
+}
+
+/* Set the type for group 1 counter with index `idx`. */
+void amu_group1_evtype_write(unsigned int idx, uint64_t val)
+{
+	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
+	assert(amu_group1_supported());
+	assert(idx < amu_group1_num_counters());
+
+	amu_group1_evtype_write_internal(idx, val);
+}
+
+/*
+ * Return whether group 1 counter at index `idx` is implemented.
+ *
+ * Using this function requires v8.6 FEAT_AMUv1p1 support.
+ */
+uint64_t amu_group1_is_counter_implemented(unsigned int idx)
+{
+	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
+	assert(amu_group1_supported());
+
+	return amu_group1_is_cnt_impl_internal(idx);
+}
+
 /*
  * Read the group 1 offset register for a given index.
  *
diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S
index b15daf3..1f2ae5c 100644
--- a/lib/extensions/amu/aarch64/amu_helpers.S
+++ b/lib/extensions/amu/aarch64/amu_helpers.S
@@ -10,6 +10,10 @@
 
 	.globl	amu_group0_cnt_read_internal
 	.globl	amu_group1_cnt_read_internal
+	.globl	amu_group1_evtype_write_internal
+	.globl	amu_group1_evtype_read_internal
+	.globl	amu_group1_num_counters_internal
+	.globl	amu_group1_is_cnt_impl_internal
 
 	/* FEAT_AMUv1p1 virtualisation offset register functions */
 	.globl	amu_group0_voffset_read_internal
@@ -258,3 +262,127 @@
 	write	AMEVCNTVOFF1E_EL2	/* index 14 */
 	write	AMEVCNTVOFF1F_EL2	/* index 15 */
 endfunc amu_group1_voffset_write_internal
+
+/*
+ * uint64_t amu_group1_evtype_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU event type register
+ * and return it in `x0`.
+ */
+func amu_group1_evtype_read_internal
+	adr	x1, 1f
+#if ENABLE_ASSERTIONS
+	/*
+	 * It can be dangerous to call this function with an
+	 * out of bounds index.  Ensure `idx` is valid.
+	 */
+	tst	x0, #~0xF
+	ASM_ASSERT(eq)
+#endif
+	/*
+	 * Given `idx` calculate address of mrs/ret instruction pair
+	 * in the table below.
+	 */
+	add	x1, x1, x0, lsl #3	/* each mrs/ret sequence is 8 bytes */
+#if ENABLE_BTI
+	add	x1, x1, x0, lsl #2	/* + "bti j" instruction */
+#endif
+	br	x1
+
+1:	read	AMEVTYPER10_EL0	/* index 0 */
+	read	AMEVTYPER11_EL0	/* index 1 */
+	read	AMEVTYPER12_EL0	/* index 2 */
+	read	AMEVTYPER13_EL0	/* index 3 */
+	read	AMEVTYPER14_EL0	/* index 4 */
+	read	AMEVTYPER15_EL0	/* index 5 */
+	read	AMEVTYPER16_EL0	/* index 6 */
+	read	AMEVTYPER17_EL0	/* index 7 */
+	read	AMEVTYPER18_EL0	/* index 8 */
+	read	AMEVTYPER19_EL0	/* index 9 */
+	read	AMEVTYPER1A_EL0	/* index 10 */
+	read	AMEVTYPER1B_EL0	/* index 11 */
+	read	AMEVTYPER1C_EL0	/* index 12 */
+	read	AMEVTYPER1D_EL0	/* index 13 */
+	read	AMEVTYPER1E_EL0	/* index 14 */
+	read	AMEVTYPER1F_EL0	/* index 15 */
+endfunc amu_group1_evtype_read_internal
+
+/*
+ * void amu_group1_evtype_write_internal(int idx, unsigned int val);
+ *
+ * Program the AMU event type register indexed by `idx`
+ * with the value `val`.
+ */
+func amu_group1_evtype_write_internal
+	adr	x2, 1f
+#if ENABLE_ASSERTIONS
+	/*
+	 * It can be dangerous to call this function with an
+	 * out of bounds index.  Ensure `idx` is valid.
+	 */
+	tst	x0, #~0xF
+	ASM_ASSERT(eq)
+
+	/* val should be between [0, 65535] */
+	tst	x1, #~0xFFFF
+	ASM_ASSERT(eq)
+#endif
+	/*
+	 * Given `idx` calculate address of msr/ret instruction pair
+	 * in the table below.
+	 */
+	add	x2, x2, x0, lsl #3	/* each msr/ret sequence is 8 bytes */
+#if ENABLE_BTI
+	add	x2, x2, x0, lsl #2	/* + "bti j" instruction */
+#endif
+	br	x2
+
+1:	write	AMEVTYPER10_EL0		/* index 0 */
+	write	AMEVTYPER11_EL0		/* index 1 */
+	write	AMEVTYPER12_EL0		/* index 2 */
+	write	AMEVTYPER13_EL0		/* index 3 */
+	write	AMEVTYPER14_EL0		/* index 4 */
+	write	AMEVTYPER15_EL0		/* index 5 */
+	write	AMEVTYPER16_EL0		/* index 6 */
+	write	AMEVTYPER17_EL0		/* index 7 */
+	write	AMEVTYPER18_EL0		/* index 8 */
+	write	AMEVTYPER19_EL0		/* index 9 */
+	write	AMEVTYPER1A_EL0		/* index 10 */
+	write	AMEVTYPER1B_EL0		/* index 11 */
+	write	AMEVTYPER1C_EL0		/* index 12 */
+	write	AMEVTYPER1D_EL0		/* index 13 */
+	write	AMEVTYPER1E_EL0		/* index 14 */
+	write	AMEVTYPER1F_EL0		/* index 15 */
+endfunc amu_group1_evtype_write_internal
+
+/*
+ * uint64_t amu_group1_num_counters_internal(int idx);
+ *
+ * Given `idx`, return the number of counters implemented for group 1.
+ */
+func amu_group1_num_counters_internal
+	mrs	x0, AMCGCR_EL0
+	ubfx	x0, x0, AMCGCR_EL0_CG1NC_SHIFT, AMCGCR_EL0_CG1NC_LENGTH
+	ret
+endfunc amu_group1_num_counters_internal
+
+/*
+ * uint64_t amu_group1_is_counter_implemented(int idx);
+ *
+ * Given `idx`, return whether counter `idx` is implemented  or not.
+ */
+func amu_group1_is_cnt_impl_internal
+#if ENABLE_ASSERTIONS
+	/*
+	 * It can be dangerous to call this function with an
+	 * out of bounds index.  Ensure `idx` is valid.
+	 */
+	tst	x0, #~0xF
+	ASM_ASSERT(eq)
+#endif
+	mrs	x1, AMCG1IDR_EL0
+	mov	x2, #1
+	lsl	x0, x2, x0
+	and	x0, x1, x0
+	ret
+endfunc amu_group1_is_cnt_impl_internal