blob: 36b3138ba28b852ec02f055815e1a6ebbe10c8e3 [file] [log] [blame]
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +01001/*
Deepika Bhavnanic249d5e2020-02-06 16:29:45 -06002 * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7/*
8 * This file contains tests that try to leak information from the secure world
9 * to the non-secure world (EL2) by using the PMU counters.
10 *
11 * The tests assume that the PMU (PMUv3) is implemented on the target, since
12 * TF-A performs initialization of the PMU and guards against PMU counter
13 * leakage.
14 *
15 * The non-secure world can use system registers to configure the PMU such that
16 * it increments counters in the secure world. Depending on the implemented
17 * features, the secure world can prohibit counting via the following:
18 * -v8.2 Debug not implemented:
19 * |-- Prohibit general event counters and the cycle counter:
20 * MDCR_EL3.SPME == 0 && !ExternalSecureNoninvasiveDebugEnabled()
21 * Since ExternalSecureNoninvasiveDebugEnabled() is a hardware
22 * line, it is not available on FVP and will therefore cause the
23 * tests to fail.
24 * The only other way is to disable the PMCR_EL0.E bit. This will
25 * disable counting altogether, but since this fix is not desired
26 * in TF-A, the tests have to be skipped if v8.2 Debug is not
27 * implemented.
28 *
29 * -v8.2 Debug implemented:
30 * |-- Prohibit general event counters: MDCR_EL3.SPME == 0. This bit
31 * resets to 0, so by default general events should not be counted
32 * in the secure world.
33 * |-- Prohibit cycle counter: MDCR_EL3.SPME == 0 && PMCR_EL0.DP == 1.
34 * This counter is only affected by MDCR_EL3.SPME if the
35 * PMCR_EL0.DP bit is set.
36 *
37 * -v8.5 implemented:
38 * |-- Prohibit general event counters: as in v8.2 Debug.
39 * |-- Prohibit cycle counter: MDCR_EL3.SCCD == 1
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +010040 *
41 * In Aarch32 state the PMU registers have identical names (apart from the
42 * '_EL0' suffix) and bit fields. As far as the PMU is concerned, the Aarch32
43 * counterpart of MDCR_EL3 is the SDCR register, which has both the SCCD and
44 * SPME bits.
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +010045 */
46
47#include <drivers/arm/arm_gic.h>
48#include <irq.h>
49#include <platform.h>
50#include <power_management.h>
51#include <sgi.h>
52#include <string.h>
53#include <test_helpers.h>
54
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +010055#define ITERATIONS_CNT 1000
56
57/*
58 * A maximum of +10% deviation in event counts is tolerated.
59 * This is useful for testing on real hardware where event counts are usually
60 * not the same between runs. The large iteration count should cause the
61 * average event count to converge to values very close to baseline when the
62 * secure world successfully prohibits PMU counters from incrementing.
63 */
64#define ALLOWED_DEVIATION 10
65
66/*
67 * An invalid SMC function number.
68 * Used to establish a base value for PMU counters on each test.
69 */
70#define INVALID_FN 0x666
71
72struct pmu_event_info {
73 unsigned long long min;
74 unsigned long long max;
75 unsigned long long avg;
76};
77
Deepika Bhavnanic249d5e2020-02-06 16:29:45 -060078#ifdef __aarch64__
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +010079#define V8_2_DEBUG_ARCH_SUPPORTED ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED
80#else
81#define V8_2_DEBUG_ARCH_SUPPORTED DBGDIDR_V8_2_DEBUG_ARCH_SUPPORTED
82#endif
83
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +010084static inline void configure_pmu_cntr0(const uint32_t event)
85{
86 /*
87 * Disabling the P bit tells the counter to increment at EL1.
88 * Setting the NSK bit to be different from the P bit further tells the
89 * counter NOT to increment at non-secure EL1. Combined with the P bit,
90 * the effect is to tell the counter to increment at secure EL1.
91 * Setting the M bit to be equal to the P bit tells the counter to
92 * increment at EL3.
93 * Disabling the NSH bit tells the counter NOT to increment at
94 * non-secure EL2.
95 * Setting the SH bit to be different to the NSH bit tells the counter
96 * to increment at secure EL2.
97 * The counter therefore is told to count only at secure EL1, secure EL2
98 * and EL3. This is to ensure maximum accuracy of the results, since we
99 * are only interested if the secure world is leaking PMU counters.
100 */
101 write_pmevtyper0_el0(
102 (read_pmevtyper0_el0() | PMEVTYPER_EL0_NSK_BIT |
103 PMEVTYPER_EL0_SH_BIT) &
104 ~(PMEVTYPER_EL0_P_BIT | PMEVTYPER_EL0_NSH_BIT |
105 PMEVTYPER_EL0_M_BIT));
106
107 /*
108 * Write to the EVTCOUNT bits to tell the counter which event to
109 * monitor.
110 */
111 write_pmevtyper0_el0(
112 (read_pmevtyper0_el0() & ~PMEVTYPER_EL0_EVTCOUNT_BITS) | event);
113
114 /* Setting the P[n] bit enables counter n */
115 write_pmcntenset_el0(
116 read_pmcntenset_el0() | PMCNTENSET_EL0_P_BIT(0));
117}
118
119static inline void configure_pmu_cycle_cntr(void)
120{
121 /*
122 * Disabling the P bit tells the counter to increment at EL1.
123 * Setting the NSK bit to be different from the P bit further tells the
124 * counter NOT to increment at non-secure EL1. Combined with the P bit,
125 * the effect is to tell the counter to increment at secure EL1.
126 * Setting the M bit to be equal to the P bit tells the counter to
127 * increment at EL3.
128 * Disabling the NSH bit tells the counter NOT to increment at
129 * non-secure EL2.
130 * Setting the SH bit to be different to the NSH bit tells the counter
131 * to increment at secure EL2.
132 * The counter therefore is told to count only at secure EL1, secure EL2
133 * and EL3. This is to ensure maximum accuracy of the results, since we
134 * are only interested if the secure world is leaking PMU counters.
135 */
136 write_pmccfiltr_el0(
137 (read_pmccfiltr_el0() | PMCCFILTR_EL0_NSK_BIT |
138 PMCCFILTR_EL0_SH_BIT) &
139 ~(PMCCFILTR_EL0_P_BIT | PMCCFILTR_EL0_NSH_BIT |
140 PMCCFILTR_EL0_M_BIT));
141
142 /* Setting the C bit enables the cycle counter in the PMU */
143 write_pmcntenset_el0(
144 read_pmcntenset_el0() | PMCNTENSET_EL0_C_BIT);
145
146 /*
147 * Disabling the DP bit makes the cycle counter increment where
148 * prohibited by MDCR_EL3.SPME. If higher execution levels don't save
149 * and restore PMCR_EL0, then PMU information will be leaked.
150 */
151 write_pmcr_el0(read_pmcr_el0() & ~PMCR_EL0_DP_BIT);
152}
153
154static inline void pmu_enable_counting(void)
155{
156 /*
157 * Setting the E bit gives [fine-grained] control to the PMCNTENSET_EL0
158 * register, which controls which counters can increment.
159 */
160 write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_E_BIT);
161}
162
163static unsigned long long profile_invalid_smc(u_register_t (*read_cntr_f)(void))
164{
165 unsigned long long evt_cnt;
166 smc_args args = { INVALID_FN };
167
168 evt_cnt = (*read_cntr_f)();
169 tftf_smc(&args);
170 evt_cnt = (*read_cntr_f)() - evt_cnt;
171
172 return evt_cnt;
173}
174
175static unsigned long long profile_cpu_suspend(u_register_t (*read_cntr_f)(void))
176{
177 unsigned long long evt_cnt;
178 unsigned int power_state;
179 unsigned int stateid;
180
181 tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
182 PSTATE_TYPE_STANDBY, &stateid);
183 power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0,
184 PSTATE_TYPE_STANDBY, stateid);
185
186 tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
187
188 /*
189 * Mask IRQ to prevent the interrupt handler being invoked
190 * and clearing the interrupt. A pending interrupt will cause this
191 * CPU to wake-up from suspend.
192 */
193 disable_irq();
194
195 /* Configure an SGI to wake-up from suspend */
196 tftf_send_sgi(IRQ_NS_SGI_0,
197 platform_get_core_pos(read_mpidr_el1() & MPID_MASK));
198
199 evt_cnt = (*read_cntr_f)();
200 tftf_cpu_suspend(power_state);
201 evt_cnt = (*read_cntr_f)() - evt_cnt;
202
203 /* Unmask the IRQ to let the interrupt handler to execute */
204 enable_irq();
205 isb();
206
207 tftf_irq_disable(IRQ_NS_SGI_0);
208
209 return evt_cnt;
210}
211
212static unsigned long long profile_fast_smc_add(u_register_t (*read_cntr_f)(void))
213{
214 unsigned long long evt_cnt;
215 smc_args args = { TSP_FAST_FID(TSP_ADD), 4, 6 };
216
217 evt_cnt = (*read_cntr_f)();
218 tftf_smc(&args);
219 evt_cnt = (*read_cntr_f)() - evt_cnt;
220
221 return evt_cnt;
222}
223
224static void measure_event(u_register_t (*read_cntr_func)(void),
225 unsigned long long (*profile_func)(u_register_t (*read_cntr_f)(void)),
226 struct pmu_event_info *info)
227{
228 unsigned long long evt_cnt;
229 unsigned long long min_cnt;
230 unsigned long long max_cnt;
231 unsigned long long avg_cnt;
232 unsigned long long cnt_sum = 0;
233
234 min_cnt = UINT64_MAX;
235 max_cnt = 0;
236
237 for (unsigned int i = 0; i < ITERATIONS_CNT; ++i) {
238 evt_cnt = (*profile_func)(read_cntr_func);
239
240 min_cnt = MIN(min_cnt, evt_cnt);
241 max_cnt = MAX(max_cnt, evt_cnt);
242
243 cnt_sum += evt_cnt;
244
245 tftf_irq_disable(IRQ_NS_SGI_0);
246 }
247
248 avg_cnt = cnt_sum / ITERATIONS_CNT;
249
250 info->avg = avg_cnt;
251 info->min = min_cnt;
252 info->max = max_cnt;
253
254 tftf_testcase_printf(
255 "Average count: %llu (ranging from %llu to %llu)\n",
256 avg_cnt,
257 min_cnt,
258 max_cnt);
259}
260
261/*
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100262 * Checks that when requesting an SMC call after getting a baseline PMU event
263 * count the number of PMU events counted is either not greater than the
264 * baseline or it has increased by no more than ALLOWED_DEVIATION%. The first
265 * comparison is required because of underflow on unsigned types.
266 * This is used to determine if PMU timing information has been leaked from the
267 * secure world.
268 */
269static bool results_within_allowed_margin(unsigned long long baseline_cnt,
270 unsigned long long smc_cnt)
271{
272 return (smc_cnt <= baseline_cnt) ||
273 (smc_cnt - baseline_cnt <= baseline_cnt / ALLOWED_DEVIATION);
274}
275
276/*
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100277 * Measure the number of retired writes to the PC in the PSCI_SUSPEND SMC.
278 * This test only succeeds if no useful information about the PMU counters has
279 * been leaked.
280 */
281test_result_t smc_psci_suspend_pc_write_retired(void)
282{
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100283#if ARM_ARCH_MAJOR < 8
284 INFO("%s skipped on ARMv7 and earlier\n", __func__);
285 return TEST_RESULT_SKIPPED;
286#else
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100287 struct pmu_event_info baseline, cpu_suspend;
288
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100289 SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(V8_2_DEBUG_ARCH_SUPPORTED);
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100290
291 configure_pmu_cntr0(PMU_EV_PC_WRITE_RETIRED);
292 pmu_enable_counting();
293
294 tftf_testcase_printf("Getting baseline event count:\n");
295 measure_event(read_pmevcntr0_el0, profile_invalid_smc, &baseline);
296 tftf_testcase_printf("Profiling PSCI_SUSPEND_PC:\n");
297 measure_event(read_pmevcntr0_el0, profile_cpu_suspend, &cpu_suspend);
298
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100299 if (!results_within_allowed_margin(baseline.avg, cpu_suspend.avg))
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100300 return TEST_RESULT_FAIL;
301 return TEST_RESULT_SUCCESS;
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100302#endif
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100303}
304
305/*
306 * Measure the CPU cycles count of the PSCI_SUSPEND SMC.
307 * This test only succeeds if no useful information about the PMU counters has
308 * been leaked.
309 */
310test_result_t smc_psci_suspend_cycles(void)
311{
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100312#if ARM_ARCH_MAJOR < 8
313 INFO("%s skipped on ARMv7 and earlier\n", __func__);
314 return TEST_RESULT_SKIPPED;
315#else
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100316 struct pmu_event_info baseline, cpu_suspend;
317
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100318 SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(V8_2_DEBUG_ARCH_SUPPORTED);
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100319
320 configure_pmu_cycle_cntr();
321 pmu_enable_counting();
322
323 tftf_testcase_printf("Getting baseline event count:\n");
324 measure_event(read_pmccntr_el0, profile_invalid_smc, &baseline);
325 tftf_testcase_printf("Profiling PSCI_SUSPEND_PC:\n");
326 measure_event(read_pmccntr_el0, profile_cpu_suspend, &cpu_suspend);
327
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100328 if (!results_within_allowed_margin(baseline.avg, cpu_suspend.avg))
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100329 return TEST_RESULT_FAIL;
330 return TEST_RESULT_SUCCESS;
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100331#endif
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100332}
333
334/*
335 * Measure the number of retired writes to the PC in the fast add SMC.
336 * This test only succeeds if no useful information about the PMU counters has
337 * been leaked.
338 */
339test_result_t fast_smc_add_pc_write_retired(void)
340{
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100341#if ARM_ARCH_MAJOR < 8
342 INFO("%s skipped on ARMv7 and earlier\n", __func__);
343 return TEST_RESULT_SKIPPED;
344#else
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100345 struct pmu_event_info baseline, fast_smc_add;
346
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100347 SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(V8_2_DEBUG_ARCH_SUPPORTED);
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100348
349 SKIP_TEST_IF_TSP_NOT_PRESENT();
350
351 configure_pmu_cntr0(PMU_EV_PC_WRITE_RETIRED);
352 pmu_enable_counting();
353
354 tftf_testcase_printf("Getting baseline event count:\n");
355 measure_event(read_pmevcntr0_el0, profile_invalid_smc, &baseline);
356 tftf_testcase_printf("Profiling Fast Add SMC:\n");
357 measure_event(read_pmevcntr0_el0, profile_fast_smc_add, &fast_smc_add);
358
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100359 if (!results_within_allowed_margin(baseline.avg, fast_smc_add.avg))
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100360 return TEST_RESULT_FAIL;
361 return TEST_RESULT_SUCCESS;
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100362#endif
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100363}
364
365/*
366 * Measure the CPU cycles count of the fast add SMC.
367 * This test only succeeds if no useful information about the PMU counters has
368 * been leaked.
369 */
370test_result_t fast_smc_add_cycles(void)
371{
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100372#if ARM_ARCH_MAJOR < 8
373 INFO("%s skipped on ARMv7 and earlier\n", __func__);
374 return TEST_RESULT_SKIPPED;
375#else
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100376 struct pmu_event_info baseline, fast_smc_add;
377
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100378 SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(V8_2_DEBUG_ARCH_SUPPORTED);
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100379
380 SKIP_TEST_IF_TSP_NOT_PRESENT();
381
382 configure_pmu_cycle_cntr();
383 pmu_enable_counting();
384
385 tftf_testcase_printf("Getting baseline event count:\n");
386 measure_event(read_pmccntr_el0, profile_invalid_smc, &baseline);
387 tftf_testcase_printf("Profiling Fast Add SMC:\n");
388 measure_event(read_pmccntr_el0, profile_fast_smc_add, &fast_smc_add);
389
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100390 if (!results_within_allowed_margin(baseline.avg, fast_smc_add.avg))
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100391 return TEST_RESULT_FAIL;
392 return TEST_RESULT_SUCCESS;
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100393#endif
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100394}