aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPetre-Ionut Tudor <petre-ionut.tudor@arm.com>2019-09-18 16:13:00 +0100
committerPetre-Ionut Tudor <petre-ionut.tudor@arm.com>2019-10-04 09:35:00 +0100
commitf68ebdb9b45cc7a58f816f153f5e626c898dc0cf (patch)
tree8eb48a5843eb4acfa86c3066173aaa2b0f898d4b
parent0012dbc2a841abeff25be8be1113f19073ee4d2c (diff)
downloadtf-a-tests-f68ebdb9b45cc7a58f816f153f5e626c898dc0cf.tar.gz
Try to leak counter values from secure world.
This patch introduces a series of tests that try to leak PMU counter values from EL3 and S_EL1. PMU events used: - CPU cycles via PMU counter PMCCNTR_EL0 - Retired writes to PC via PMU counter PMEVCNTR0_EL0 This AARCH64-specific patch is for security fix: https://review.trustedfirmware.org/c/TF-A/trusted-firmware-a/+/1789 The AARCH32 versions of these tests will be in a future patch. Signed-off-by: Petre-Ionut Tudor <petre-ionut.tudor@arm.com> Change-Id: Ib27948edadde30272e59a9ab208543703fa078bd
-rw-r--r--include/common/test_helpers.h14
-rw-r--r--include/lib/aarch64/arch.h34
-rw-r--r--include/lib/aarch64/arch_helpers.h6
-rw-r--r--tftf/tests/misc_tests/test_pmu_leakage.c378
-rw-r--r--tftf/tests/tests-extensive.xml2
-rw-r--r--tftf/tests/tests-pmu-leakage.mk9
-rw-r--r--tftf/tests/tests-pmu-leakage.xml16
-rw-r--r--tftf/tests/tests-standard.mk3
-rw-r--r--tftf/tests/tests-standard.xml4
9 files changed, 464 insertions, 2 deletions
diff --git a/include/common/test_helpers.h b/include/common/test_helpers.h
index 2cf6e83d2..141f00c79 100644
--- a/include/common/test_helpers.h
+++ b/include/common/test_helpers.h
@@ -155,6 +155,20 @@ typedef test_result_t (*test_function_arg_t)(void *arg);
} \
} while (0)
+#define SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(version) \
+ do { \
+ uint32_t debug_ver = read_id_aa64dfr0_el1() & \
+ (ID_AA64DFR0_DEBUG_MASK << ID_AA64DFR0_DEBUG_SHIFT); \
+ \
+ if ((debug_ver >> ID_AA64DFR0_DEBUG_SHIFT) < version) { \
+ tftf_testcase_printf("Debug version returned %d\n" \
+ "The required version is %d\n", \
+ debug_ver >> ID_AA64DFR0_DEBUG_SHIFT,\
+ version); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
/* Helper macro to verify if system suspend API is supported */
#define is_psci_sys_susp_supported() \
(tftf_get_psci_feature_info(SMC_PSCI_SYSTEM_SUSPEND) \
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index c839d1a6e..a62f13ce9 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -148,6 +148,15 @@
#define ID_AA64DFR0_PMS_LENGTH U(4)
#define ID_AA64DFR0_PMS_MASK ULL(0xf)
+/* ID_AA64DFR0_EL1.DEBUG definitions */
+#define ID_AA64DFR0_DEBUG_SHIFT U(0)
+#define ID_AA64DFR0_DEBUG_LENGTH U(4)
+#define ID_AA64DFR0_DEBUG_MASK ULL(0xf)
+#define ID_AA64DFR0_V8_DEBUG_ARCH_SUPPORTED U(6)
+#define ID_AA64DFR0_V8_DEBUG_ARCH_VHE_SUPPORTED U(7)
+#define ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED U(8)
+#define ID_AA64DFR0_V8_4_DEBUG_ARCH_SUPPORTED U(9)
+
#define EL_IMPL_NONE ULL(0)
#define EL_IMPL_A64ONLY ULL(1)
#define EL_IMPL_A64_A32 ULL(2)
@@ -659,6 +668,31 @@
#define PMCR_EL0_DP_BIT (U(1) << 5)
#define PMCR_EL0_X_BIT (U(1) << 4)
#define PMCR_EL0_D_BIT (U(1) << 3)
+#define PMCR_EL0_E_BIT (U(1) << 0)
+
+/* PMCNTENSET_EL0 definitions */
+#define PMCNTENSET_EL0_C_BIT (U(1) << 31)
+#define PMCNTENSET_EL0_P_BIT(x) (U(1) << x)
+
+/* PMEVTYPER<n>_EL0 definitions */
+#define PMEVTYPER_EL0_P_BIT (U(1) << 31)
+#define PMEVTYPER_EL0_NSK_BIT (U(1) << 29)
+#define PMEVTYPER_EL0_NSH_BIT (U(1) << 27)
+#define PMEVTYPER_EL0_M_BIT (U(1) << 26)
+#define PMEVTYPER_EL0_MT_BIT (U(1) << 25)
+#define PMEVTYPER_EL0_SH_BIT (U(1) << 24)
+#define PMEVTYPER_EL0_EVTCOUNT_BITS U(0x000003FF)
+
+/* PMCCFILTR_EL0 definitions */
+#define PMCCFILTR_EL0_P_BIT (U(1) << 31)
+#define PMCCFILTR_EL0_NSK_BIT (U(1) << 29)
+#define PMCCFILTR_EL0_NSH_BIT (U(1) << 27)
+#define PMCCFILTR_EL0_M_BIT (U(1) << 26)
+#define PMCCFILTR_EL0_MT_BIT (U(1) << 25)
+#define PMCCFILTR_EL0_SH_BIT (U(1) << 24)
+
+/* PMU event counter ID definitions */
+#define PMU_EV_PC_WRITE_RETIRED U(0x000C)
/*******************************************************************************
* Definitions for system register interface to SVE
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index 151e5be94..9d1ebb8d2 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -395,6 +395,12 @@ DEFINE_SYSREG_RW_FUNCS(mdcr_el2)
DEFINE_SYSREG_RW_FUNCS(mdcr_el3)
DEFINE_SYSREG_RW_FUNCS(hstr_el2)
DEFINE_SYSREG_RW_FUNCS(pmcr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmcntenset_el0)
+DEFINE_SYSREG_READ_FUNC(pmccntr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmccfiltr_el0)
+
+DEFINE_SYSREG_RW_FUNCS(pmevtyper0_el0)
+DEFINE_SYSREG_READ_FUNC(pmevcntr0_el0)
/* GICv3 System Registers */
diff --git a/tftf/tests/misc_tests/test_pmu_leakage.c b/tftf/tests/misc_tests/test_pmu_leakage.c
new file mode 100644
index 000000000..a0ac82d7e
--- /dev/null
+++ b/tftf/tests/misc_tests/test_pmu_leakage.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file contains tests that try to leak information from the secure world
+ * to the non-secure world (EL2) by using the PMU counters.
+ *
+ * The tests assume that the PMU (PMUv3) is implemented on the target, since
+ * TF-A performs initialization of the PMU and guards against PMU counter
+ * leakage.
+ *
+ * The non-secure world can use system registers to configure the PMU such that
+ * it increments counters in the secure world. Depending on the implemented
+ * features, the secure world can prohibit counting via the following:
+ * -v8.2 Debug not implemented:
+ * |-- Prohibit general event counters and the cycle counter:
+ * MDCR_EL3.SPME == 0 && !ExternalSecureNoninvasiveDebugEnabled()
+ * Since ExternalSecureNoninvasiveDebugEnabled() is a hardware
+ * line, it is not available on FVP and will therefore cause the
+ * tests to fail.
+ * The only other way is to disable the PMCR_EL0.E bit. This will
+ * disable counting altogether, but since this fix is not desired
+ * in TF-A, the tests have to be skipped if v8.2 Debug is not
+ * implemented.
+ *
+ * -v8.2 Debug implemented:
+ * |-- Prohibit general event counters: MDCR_EL3.SPME == 0. This bit
+ * resets to 0, so by default general events should not be counted
+ * in the secure world.
+ * |-- Prohibit cycle counter: MDCR_EL3.SPME == 0 && PMCR_EL0.DP == 1.
+ * This counter is only affected by MDCR_EL3.SPME if the
+ * PMCR_EL0.DP bit is set.
+ *
+ * -v8.5 implemented:
+ * |-- Prohibit general event counters: as in v8.2 Debug.
+ * |-- Prohibit cycle counter: MDCR_EL3.SCCD == 1
+ */
+
+#include <drivers/arm/arm_gic.h>
+#include <irq.h>
+#include <platform.h>
+#include <power_management.h>
+#include <sgi.h>
+#include <string.h>
+#include <test_helpers.h>
+
+#ifdef AARCH64
+#define ITERATIONS_CNT 1000
+
+/*
+ * A maximum of +10% deviation in event counts is tolerated.
+ * This is useful for testing on real hardware where event counts are usually
+ * not the same between runs. The large iteration count should cause the
+ * average event count to converge to values very close to baseline when the
+ * secure world successfully prohibits PMU counters from incrementing.
+ */
+#define ALLOWED_DEVIATION 10
+
+/*
+ * An invalid SMC function number.
+ * Used to establish a base value for PMU counters on each test.
+ */
+#define INVALID_FN 0x666
+
+struct pmu_event_info {
+ unsigned long long min;
+ unsigned long long max;
+ unsigned long long avg;
+};
+
+static inline void configure_pmu_cntr0(const uint32_t event)
+{
+ /*
+ * Disabling the P bit tells the counter to increment at EL1.
+ * Setting the NSK bit to be different from the P bit further tells the
+ * counter NOT to increment at non-secure EL1. Combined with the P bit,
+ * the effect is to tell the counter to increment at secure EL1.
+ * Setting the M bit to be equal to the P bit tells the counter to
+ * increment at EL3.
+ * Disabling the NSH bit tells the counter NOT to increment at
+ * non-secure EL2.
+ * Setting the SH bit to be different to the NSH bit tells the counter
+ * to increment at secure EL2.
+ * The counter therefore is told to count only at secure EL1, secure EL2
+ * and EL3. This is to ensure maximum accuracy of the results, since we
+ * are only interested if the secure world is leaking PMU counters.
+ */
+ write_pmevtyper0_el0(
+ (read_pmevtyper0_el0() | PMEVTYPER_EL0_NSK_BIT |
+ PMEVTYPER_EL0_SH_BIT) &
+ ~(PMEVTYPER_EL0_P_BIT | PMEVTYPER_EL0_NSH_BIT |
+ PMEVTYPER_EL0_M_BIT));
+
+ /*
+ * Write to the EVTCOUNT bits to tell the counter which event to
+ * monitor.
+ */
+ write_pmevtyper0_el0(
+ (read_pmevtyper0_el0() & ~PMEVTYPER_EL0_EVTCOUNT_BITS) | event);
+
+ /* Setting the P[n] bit enables counter n */
+ write_pmcntenset_el0(
+ read_pmcntenset_el0() | PMCNTENSET_EL0_P_BIT(0));
+}
+
+static inline void configure_pmu_cycle_cntr(void)
+{
+ /*
+ * Disabling the P bit tells the counter to increment at EL1.
+ * Setting the NSK bit to be different from the P bit further tells the
+ * counter NOT to increment at non-secure EL1. Combined with the P bit,
+ * the effect is to tell the counter to increment at secure EL1.
+ * Setting the M bit to be equal to the P bit tells the counter to
+ * increment at EL3.
+ * Disabling the NSH bit tells the counter NOT to increment at
+ * non-secure EL2.
+ * Setting the SH bit to be different to the NSH bit tells the counter
+ * to increment at secure EL2.
+ * The counter therefore is told to count only at secure EL1, secure EL2
+ * and EL3. This is to ensure maximum accuracy of the results, since we
+ * are only interested if the secure world is leaking PMU counters.
+ */
+ write_pmccfiltr_el0(
+ (read_pmccfiltr_el0() | PMCCFILTR_EL0_NSK_BIT |
+ PMCCFILTR_EL0_SH_BIT) &
+ ~(PMCCFILTR_EL0_P_BIT | PMCCFILTR_EL0_NSH_BIT |
+ PMCCFILTR_EL0_M_BIT));
+
+ /* Setting the C bit enables the cycle counter in the PMU */
+ write_pmcntenset_el0(
+ read_pmcntenset_el0() | PMCNTENSET_EL0_C_BIT);
+
+ /*
+ * Disabling the DP bit makes the cycle counter increment where
+ * prohibited by MDCR_EL3.SPME. If higher execution levels don't save
+ * and restore PMCR_EL0, then PMU information will be leaked.
+ */
+ write_pmcr_el0(read_pmcr_el0() & ~PMCR_EL0_DP_BIT);
+}
+
+static inline void pmu_enable_counting(void)
+{
+ /*
+ * Setting the E bit gives [fine-grained] control to the PMCNTENSET_EL0
+ * register, which controls which counters can increment.
+ */
+ write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_E_BIT);
+}
+
+static unsigned long long profile_invalid_smc(u_register_t (*read_cntr_f)(void))
+{
+ unsigned long long evt_cnt;
+ smc_args args = { INVALID_FN };
+
+ evt_cnt = (*read_cntr_f)();
+ tftf_smc(&args);
+ evt_cnt = (*read_cntr_f)() - evt_cnt;
+
+ return evt_cnt;
+}
+
+static unsigned long long profile_cpu_suspend(u_register_t (*read_cntr_f)(void))
+{
+ unsigned long long evt_cnt;
+ unsigned int power_state;
+ unsigned int stateid;
+
+ tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
+ PSTATE_TYPE_STANDBY, &stateid);
+ power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0,
+ PSTATE_TYPE_STANDBY, stateid);
+
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ /*
+ * Mask IRQ to prevent the interrupt handler being invoked
+ * and clearing the interrupt. A pending interrupt will cause this
+ * CPU to wake-up from suspend.
+ */
+ disable_irq();
+
+ /* Configure an SGI to wake-up from suspend */
+ tftf_send_sgi(IRQ_NS_SGI_0,
+ platform_get_core_pos(read_mpidr_el1() & MPID_MASK));
+
+ evt_cnt = (*read_cntr_f)();
+ tftf_cpu_suspend(power_state);
+ evt_cnt = (*read_cntr_f)() - evt_cnt;
+
+ /* Unmask the IRQ to let the interrupt handler to execute */
+ enable_irq();
+ isb();
+
+ tftf_irq_disable(IRQ_NS_SGI_0);
+
+ return evt_cnt;
+}
+
+static unsigned long long profile_fast_smc_add(u_register_t (*read_cntr_f)(void))
+{
+ unsigned long long evt_cnt;
+ smc_args args = { TSP_FAST_FID(TSP_ADD), 4, 6 };
+
+ evt_cnt = (*read_cntr_f)();
+ tftf_smc(&args);
+ evt_cnt = (*read_cntr_f)() - evt_cnt;
+
+ return evt_cnt;
+}
+
+static void measure_event(u_register_t (*read_cntr_func)(void),
+ unsigned long long (*profile_func)(u_register_t (*read_cntr_f)(void)),
+ struct pmu_event_info *info)
+{
+ unsigned long long evt_cnt;
+ unsigned long long min_cnt;
+ unsigned long long max_cnt;
+ unsigned long long avg_cnt;
+ unsigned long long cnt_sum = 0;
+
+ min_cnt = UINT64_MAX;
+ max_cnt = 0;
+
+ for (unsigned int i = 0; i < ITERATIONS_CNT; ++i) {
+ evt_cnt = (*profile_func)(read_cntr_func);
+
+ min_cnt = MIN(min_cnt, evt_cnt);
+ max_cnt = MAX(max_cnt, evt_cnt);
+
+ cnt_sum += evt_cnt;
+
+ tftf_irq_disable(IRQ_NS_SGI_0);
+ }
+
+ avg_cnt = cnt_sum / ITERATIONS_CNT;
+
+ info->avg = avg_cnt;
+ info->min = min_cnt;
+ info->max = max_cnt;
+
+ tftf_testcase_printf(
+ "Average count: %llu (ranging from %llu to %llu)\n",
+ avg_cnt,
+ min_cnt,
+ max_cnt);
+}
+
+/*
+ * Measure the number of retired writes to the PC in the PSCI_SUSPEND SMC.
+ * This test only succeeds if no useful information about the PMU counters has
+ * been leaked.
+ */
+test_result_t smc_psci_suspend_pc_write_retired(void)
+{
+ struct pmu_event_info baseline, cpu_suspend;
+
+ SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(
+ ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED);
+
+ configure_pmu_cntr0(PMU_EV_PC_WRITE_RETIRED);
+ pmu_enable_counting();
+
+ tftf_testcase_printf("Getting baseline event count:\n");
+ measure_event(read_pmevcntr0_el0, profile_invalid_smc, &baseline);
+ tftf_testcase_printf("Profiling PSCI_SUSPEND_PC:\n");
+ measure_event(read_pmevcntr0_el0, profile_cpu_suspend, &cpu_suspend);
+
+ if (cpu_suspend.avg - baseline.avg > baseline.avg / ALLOWED_DEVIATION)
+ return TEST_RESULT_FAIL;
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Measure the CPU cycles count of the PSCI_SUSPEND SMC.
+ * This test only succeeds if no useful information about the PMU counters has
+ * been leaked.
+ */
+test_result_t smc_psci_suspend_cycles(void)
+{
+ struct pmu_event_info baseline, cpu_suspend;
+
+ SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(
+ ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED);
+
+ configure_pmu_cycle_cntr();
+ pmu_enable_counting();
+
+ tftf_testcase_printf("Getting baseline event count:\n");
+ measure_event(read_pmccntr_el0, profile_invalid_smc, &baseline);
+ tftf_testcase_printf("Profiling PSCI_SUSPEND_PC:\n");
+ measure_event(read_pmccntr_el0, profile_cpu_suspend, &cpu_suspend);
+
+ if (cpu_suspend.avg - baseline.avg > baseline.avg / ALLOWED_DEVIATION)
+ return TEST_RESULT_FAIL;
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Measure the number of retired writes to the PC in the fast add SMC.
+ * This test only succeeds if no useful information about the PMU counters has
+ * been leaked.
+ */
+test_result_t fast_smc_add_pc_write_retired(void)
+{
+ struct pmu_event_info baseline, fast_smc_add;
+
+ SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(
+ ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED);
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ configure_pmu_cntr0(PMU_EV_PC_WRITE_RETIRED);
+ pmu_enable_counting();
+
+ tftf_testcase_printf("Getting baseline event count:\n");
+ measure_event(read_pmevcntr0_el0, profile_invalid_smc, &baseline);
+ tftf_testcase_printf("Profiling Fast Add SMC:\n");
+ measure_event(read_pmevcntr0_el0, profile_fast_smc_add, &fast_smc_add);
+
+ if (fast_smc_add.avg - baseline.avg > baseline.avg / ALLOWED_DEVIATION)
+ return TEST_RESULT_FAIL;
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Measure the CPU cycles count of the fast add SMC.
+ * This test only succeeds if no useful information about the PMU counters has
+ * been leaked.
+ */
+test_result_t fast_smc_add_cycles(void)
+{
+ struct pmu_event_info baseline, fast_smc_add;
+
+ SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(
+ ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED);
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ configure_pmu_cycle_cntr();
+ pmu_enable_counting();
+
+ tftf_testcase_printf("Getting baseline event count:\n");
+ measure_event(read_pmccntr_el0, profile_invalid_smc, &baseline);
+ tftf_testcase_printf("Profiling Fast Add SMC:\n");
+ measure_event(read_pmccntr_el0, profile_fast_smc_add, &fast_smc_add);
+
+ if (fast_smc_add.avg - baseline.avg > baseline.avg / ALLOWED_DEVIATION)
+ return TEST_RESULT_FAIL;
+ return TEST_RESULT_SUCCESS;
+}
+#else
+test_result_t smc_psci_suspend_pc_write_retired(void)
+{
+ INFO("%s skipped on AArch32\n", __func__);
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t smc_psci_suspend_cycles(void)
+{
+ INFO("%s skipped on AArch32\n", __func__);
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t fast_smc_add_pc_write_retired(void)
+{
+ INFO("%s skipped on AArch32\n", __func__);
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t fast_smc_add_cycles(void)
+{
+ INFO("%s skipped on AArch32\n", __func__);
+ return TEST_RESULT_SKIPPED;
+}
+#endif
diff --git a/tftf/tests/tests-extensive.xml b/tftf/tests/tests-extensive.xml
index 67d2346be..773c19e0f 100644
--- a/tftf/tests/tests-extensive.xml
+++ b/tftf/tests/tests-extensive.xml
@@ -22,6 +22,7 @@
<!ENTITY tests-cpu-extensions SYSTEM "tests-cpu-extensions.xml">
<!ENTITY tests-performance SYSTEM "tests-performance.xml">
<!ENTITY tests-smc SYSTEM "tests-smc.xml">
+ <!ENTITY tests-pmu-leakage SYSTEM "tests-pmu-leakage.xml">
]>
<testsuites>
@@ -39,5 +40,6 @@
&tests-cpu-extensions;
&tests-performance;
&tests-smc;
+ &tests-pmu-leakage;
</testsuites>
diff --git a/tftf/tests/tests-pmu-leakage.mk b/tftf/tests/tests-pmu-leakage.mk
new file mode 100644
index 000000000..2d46073fc
--- /dev/null
+++ b/tftf/tests/tests-pmu-leakage.mk
@@ -0,0 +1,9 @@
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
+ test_pmu_leakage.c \
+ )
diff --git a/tftf/tests/tests-pmu-leakage.xml b/tftf/tests/tests-pmu-leakage.xml
new file mode 100644
index 000000000..932c21fc4
--- /dev/null
+++ b/tftf/tests/tests-pmu-leakage.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="PMU Leakage" description="Increment PMU counters in the secure world">
+ <testcase name="Leak PMU PC_WRITE_RETIRED counter values from EL3 on PSCI suspend SMC" function="smc_psci_suspend_pc_write_retired" />
+ <testcase name="Leak PMU CYCLE counter values from EL3 on PSCI suspend SMC" function="smc_psci_suspend_cycles" />
+ <testcase name="Leak PMU PC_WRITE_RETIRED counter values from S_EL1 on fast SMC add" function="fast_smc_add_pc_write_retired" />
+ <testcase name="Leak PMU CYCLE counter values from S_EL1 on fast SMC add" function="fast_smc_add_cycles" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-standard.mk b/tftf/tests/tests-standard.mk
index f249a373c..9ef75bb94 100644
--- a/tftf/tests/tests-standard.mk
+++ b/tftf/tests/tests-standard.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2019, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -10,6 +10,7 @@ TESTS_MAKEFILE := $(addprefix tftf/tests/, \
tests-cpu-extensions.mk \
tests-el3-power-state.mk \
tests-performance.mk \
+ tests-pmu-leakage.mk \
tests-psci.mk \
tests-runtime-instrumentation.mk \
tests-sdei.mk \
diff --git a/tftf/tests/tests-standard.xml b/tftf/tests/tests-standard.xml
index a1323d555..fa5762173 100644
--- a/tftf/tests/tests-standard.xml
+++ b/tftf/tests/tests-standard.xml
@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2018, Arm Limited. All rights reserved.
+ Copyright (c) 2018-2019, Arm Limited. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-->
@@ -20,6 +20,7 @@
<!ENTITY tests-cpu-extensions SYSTEM "tests-cpu-extensions.xml">
<!ENTITY tests-performance SYSTEM "tests-performance.xml">
<!ENTITY tests-smc SYSTEM "tests-smc.xml">
+ <!ENTITY tests-pmu-leakage SYSTEM "tests-pmu-leakage.xml">
]>
<testsuites>
@@ -35,5 +36,6 @@
&tests-cpu-extensions;
&tests-performance;
&tests-smc;
+ &tests-pmu-leakage;
</testsuites>