blob: 09f4d510a9d6ae6c8d15cbc03f010e18393f5e10 [file] [log] [blame]
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +01001/*
2 * Copyright (c) 2019, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7/*
8 * This file contains tests that try to leak information from the secure world
9 * to the non-secure world (EL2) by using the PMU counters.
10 *
11 * The tests assume that the PMU (PMUv3) is implemented on the target, since
12 * TF-A performs initialization of the PMU and guards against PMU counter
13 * leakage.
14 *
15 * The non-secure world can use system registers to configure the PMU such that
16 * it increments counters in the secure world. Depending on the implemented
17 * features, the secure world can prohibit counting via the following:
18 * -v8.2 Debug not implemented:
19 * |-- Prohibit general event counters and the cycle counter:
20 * MDCR_EL3.SPME == 0 && !ExternalSecureNoninvasiveDebugEnabled()
21 * Since ExternalSecureNoninvasiveDebugEnabled() is a hardware
22 * line, it is not available on FVP and will therefore cause the
23 * tests to fail.
24 * The only other way is to disable the PMCR_EL0.E bit. This will
25 * disable counting altogether, but since this fix is not desired
26 * in TF-A, the tests have to be skipped if v8.2 Debug is not
27 * implemented.
28 *
29 * -v8.2 Debug implemented:
30 * |-- Prohibit general event counters: MDCR_EL3.SPME == 0. This bit
31 * resets to 0, so by default general events should not be counted
32 * in the secure world.
33 * |-- Prohibit cycle counter: MDCR_EL3.SPME == 0 && PMCR_EL0.DP == 1.
34 * This counter is only affected by MDCR_EL3.SPME if the
35 * PMCR_EL0.DP bit is set.
36 *
37 * -v8.5 implemented:
38 * |-- Prohibit general event counters: as in v8.2 Debug.
39 * |-- Prohibit cycle counter: MDCR_EL3.SCCD == 1
40 */
41
42#include <drivers/arm/arm_gic.h>
43#include <irq.h>
44#include <platform.h>
45#include <power_management.h>
46#include <sgi.h>
47#include <string.h>
48#include <test_helpers.h>
49
50#ifdef AARCH64
51#define ITERATIONS_CNT 1000
52
53/*
54 * A maximum of +10% deviation in event counts is tolerated.
55 * This is useful for testing on real hardware where event counts are usually
56 * not the same between runs. The large iteration count should cause the
57 * average event count to converge to values very close to baseline when the
58 * secure world successfully prohibits PMU counters from incrementing.
59 */
60#define ALLOWED_DEVIATION 10
61
62/*
63 * An invalid SMC function number.
64 * Used to establish a base value for PMU counters on each test.
65 */
66#define INVALID_FN 0x666
67
68struct pmu_event_info {
69 unsigned long long min;
70 unsigned long long max;
71 unsigned long long avg;
72};
73
74static inline void configure_pmu_cntr0(const uint32_t event)
75{
76 /*
77 * Disabling the P bit tells the counter to increment at EL1.
78 * Setting the NSK bit to be different from the P bit further tells the
79 * counter NOT to increment at non-secure EL1. Combined with the P bit,
80 * the effect is to tell the counter to increment at secure EL1.
81 * Setting the M bit to be equal to the P bit tells the counter to
82 * increment at EL3.
83 * Disabling the NSH bit tells the counter NOT to increment at
84 * non-secure EL2.
85 * Setting the SH bit to be different to the NSH bit tells the counter
86 * to increment at secure EL2.
87 * The counter therefore is told to count only at secure EL1, secure EL2
88 * and EL3. This is to ensure maximum accuracy of the results, since we
89 * are only interested if the secure world is leaking PMU counters.
90 */
91 write_pmevtyper0_el0(
92 (read_pmevtyper0_el0() | PMEVTYPER_EL0_NSK_BIT |
93 PMEVTYPER_EL0_SH_BIT) &
94 ~(PMEVTYPER_EL0_P_BIT | PMEVTYPER_EL0_NSH_BIT |
95 PMEVTYPER_EL0_M_BIT));
96
97 /*
98 * Write to the EVTCOUNT bits to tell the counter which event to
99 * monitor.
100 */
101 write_pmevtyper0_el0(
102 (read_pmevtyper0_el0() & ~PMEVTYPER_EL0_EVTCOUNT_BITS) | event);
103
104 /* Setting the P[n] bit enables counter n */
105 write_pmcntenset_el0(
106 read_pmcntenset_el0() | PMCNTENSET_EL0_P_BIT(0));
107}
108
109static inline void configure_pmu_cycle_cntr(void)
110{
111 /*
112 * Disabling the P bit tells the counter to increment at EL1.
113 * Setting the NSK bit to be different from the P bit further tells the
114 * counter NOT to increment at non-secure EL1. Combined with the P bit,
115 * the effect is to tell the counter to increment at secure EL1.
116 * Setting the M bit to be equal to the P bit tells the counter to
117 * increment at EL3.
118 * Disabling the NSH bit tells the counter NOT to increment at
119 * non-secure EL2.
120 * Setting the SH bit to be different to the NSH bit tells the counter
121 * to increment at secure EL2.
122 * The counter therefore is told to count only at secure EL1, secure EL2
123 * and EL3. This is to ensure maximum accuracy of the results, since we
124 * are only interested if the secure world is leaking PMU counters.
125 */
126 write_pmccfiltr_el0(
127 (read_pmccfiltr_el0() | PMCCFILTR_EL0_NSK_BIT |
128 PMCCFILTR_EL0_SH_BIT) &
129 ~(PMCCFILTR_EL0_P_BIT | PMCCFILTR_EL0_NSH_BIT |
130 PMCCFILTR_EL0_M_BIT));
131
132 /* Setting the C bit enables the cycle counter in the PMU */
133 write_pmcntenset_el0(
134 read_pmcntenset_el0() | PMCNTENSET_EL0_C_BIT);
135
136 /*
137 * Disabling the DP bit makes the cycle counter increment where
138 * prohibited by MDCR_EL3.SPME. If higher execution levels don't save
139 * and restore PMCR_EL0, then PMU information will be leaked.
140 */
141 write_pmcr_el0(read_pmcr_el0() & ~PMCR_EL0_DP_BIT);
142}
143
144static inline void pmu_enable_counting(void)
145{
146 /*
147 * Setting the E bit gives [fine-grained] control to the PMCNTENSET_EL0
148 * register, which controls which counters can increment.
149 */
150 write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_E_BIT);
151}
152
153static unsigned long long profile_invalid_smc(u_register_t (*read_cntr_f)(void))
154{
155 unsigned long long evt_cnt;
156 smc_args args = { INVALID_FN };
157
158 evt_cnt = (*read_cntr_f)();
159 tftf_smc(&args);
160 evt_cnt = (*read_cntr_f)() - evt_cnt;
161
162 return evt_cnt;
163}
164
165static unsigned long long profile_cpu_suspend(u_register_t (*read_cntr_f)(void))
166{
167 unsigned long long evt_cnt;
168 unsigned int power_state;
169 unsigned int stateid;
170
171 tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
172 PSTATE_TYPE_STANDBY, &stateid);
173 power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0,
174 PSTATE_TYPE_STANDBY, stateid);
175
176 tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
177
178 /*
179 * Mask IRQ to prevent the interrupt handler being invoked
180 * and clearing the interrupt. A pending interrupt will cause this
181 * CPU to wake-up from suspend.
182 */
183 disable_irq();
184
185 /* Configure an SGI to wake-up from suspend */
186 tftf_send_sgi(IRQ_NS_SGI_0,
187 platform_get_core_pos(read_mpidr_el1() & MPID_MASK));
188
189 evt_cnt = (*read_cntr_f)();
190 tftf_cpu_suspend(power_state);
191 evt_cnt = (*read_cntr_f)() - evt_cnt;
192
193 /* Unmask the IRQ to let the interrupt handler to execute */
194 enable_irq();
195 isb();
196
197 tftf_irq_disable(IRQ_NS_SGI_0);
198
199 return evt_cnt;
200}
201
202static unsigned long long profile_fast_smc_add(u_register_t (*read_cntr_f)(void))
203{
204 unsigned long long evt_cnt;
205 smc_args args = { TSP_FAST_FID(TSP_ADD), 4, 6 };
206
207 evt_cnt = (*read_cntr_f)();
208 tftf_smc(&args);
209 evt_cnt = (*read_cntr_f)() - evt_cnt;
210
211 return evt_cnt;
212}
213
214static void measure_event(u_register_t (*read_cntr_func)(void),
215 unsigned long long (*profile_func)(u_register_t (*read_cntr_f)(void)),
216 struct pmu_event_info *info)
217{
218 unsigned long long evt_cnt;
219 unsigned long long min_cnt;
220 unsigned long long max_cnt;
221 unsigned long long avg_cnt;
222 unsigned long long cnt_sum = 0;
223
224 min_cnt = UINT64_MAX;
225 max_cnt = 0;
226
227 for (unsigned int i = 0; i < ITERATIONS_CNT; ++i) {
228 evt_cnt = (*profile_func)(read_cntr_func);
229
230 min_cnt = MIN(min_cnt, evt_cnt);
231 max_cnt = MAX(max_cnt, evt_cnt);
232
233 cnt_sum += evt_cnt;
234
235 tftf_irq_disable(IRQ_NS_SGI_0);
236 }
237
238 avg_cnt = cnt_sum / ITERATIONS_CNT;
239
240 info->avg = avg_cnt;
241 info->min = min_cnt;
242 info->max = max_cnt;
243
244 tftf_testcase_printf(
245 "Average count: %llu (ranging from %llu to %llu)\n",
246 avg_cnt,
247 min_cnt,
248 max_cnt);
249}
250
251/*
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100252 * Checks that when requesting an SMC call after getting a baseline PMU event
253 * count the number of PMU events counted is either not greater than the
254 * baseline or it has increased by no more than ALLOWED_DEVIATION%. The first
255 * comparison is required because of underflow on unsigned types.
256 * This is used to determine if PMU timing information has been leaked from the
257 * secure world.
258 */
259static bool results_within_allowed_margin(unsigned long long baseline_cnt,
260 unsigned long long smc_cnt)
261{
262 return (smc_cnt <= baseline_cnt) ||
263 (smc_cnt - baseline_cnt <= baseline_cnt / ALLOWED_DEVIATION);
264}
265
266/*
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100267 * Measure the number of retired writes to the PC in the PSCI_SUSPEND SMC.
268 * This test only succeeds if no useful information about the PMU counters has
269 * been leaked.
270 */
271test_result_t smc_psci_suspend_pc_write_retired(void)
272{
273 struct pmu_event_info baseline, cpu_suspend;
274
275 SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(
276 ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED);
277
278 configure_pmu_cntr0(PMU_EV_PC_WRITE_RETIRED);
279 pmu_enable_counting();
280
281 tftf_testcase_printf("Getting baseline event count:\n");
282 measure_event(read_pmevcntr0_el0, profile_invalid_smc, &baseline);
283 tftf_testcase_printf("Profiling PSCI_SUSPEND_PC:\n");
284 measure_event(read_pmevcntr0_el0, profile_cpu_suspend, &cpu_suspend);
285
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100286 if (!results_within_allowed_margin(baseline.avg, cpu_suspend.avg))
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100287 return TEST_RESULT_FAIL;
288 return TEST_RESULT_SUCCESS;
289}
290
291/*
292 * Measure the CPU cycles count of the PSCI_SUSPEND SMC.
293 * This test only succeeds if no useful information about the PMU counters has
294 * been leaked.
295 */
296test_result_t smc_psci_suspend_cycles(void)
297{
298 struct pmu_event_info baseline, cpu_suspend;
299
300 SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(
301 ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED);
302
303 configure_pmu_cycle_cntr();
304 pmu_enable_counting();
305
306 tftf_testcase_printf("Getting baseline event count:\n");
307 measure_event(read_pmccntr_el0, profile_invalid_smc, &baseline);
308 tftf_testcase_printf("Profiling PSCI_SUSPEND_PC:\n");
309 measure_event(read_pmccntr_el0, profile_cpu_suspend, &cpu_suspend);
310
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100311 if (!results_within_allowed_margin(baseline.avg, cpu_suspend.avg))
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100312 return TEST_RESULT_FAIL;
313 return TEST_RESULT_SUCCESS;
314}
315
316/*
317 * Measure the number of retired writes to the PC in the fast add SMC.
318 * This test only succeeds if no useful information about the PMU counters has
319 * been leaked.
320 */
321test_result_t fast_smc_add_pc_write_retired(void)
322{
323 struct pmu_event_info baseline, fast_smc_add;
324
325 SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(
326 ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED);
327
328 SKIP_TEST_IF_TSP_NOT_PRESENT();
329
330 configure_pmu_cntr0(PMU_EV_PC_WRITE_RETIRED);
331 pmu_enable_counting();
332
333 tftf_testcase_printf("Getting baseline event count:\n");
334 measure_event(read_pmevcntr0_el0, profile_invalid_smc, &baseline);
335 tftf_testcase_printf("Profiling Fast Add SMC:\n");
336 measure_event(read_pmevcntr0_el0, profile_fast_smc_add, &fast_smc_add);
337
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100338 if (!results_within_allowed_margin(baseline.avg, fast_smc_add.avg))
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100339 return TEST_RESULT_FAIL;
340 return TEST_RESULT_SUCCESS;
341}
342
343/*
344 * Measure the CPU cycles count of the fast add SMC.
345 * This test only succeeds if no useful information about the PMU counters has
346 * been leaked.
347 */
348test_result_t fast_smc_add_cycles(void)
349{
350 struct pmu_event_info baseline, fast_smc_add;
351
352 SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(
353 ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED);
354
355 SKIP_TEST_IF_TSP_NOT_PRESENT();
356
357 configure_pmu_cycle_cntr();
358 pmu_enable_counting();
359
360 tftf_testcase_printf("Getting baseline event count:\n");
361 measure_event(read_pmccntr_el0, profile_invalid_smc, &baseline);
362 tftf_testcase_printf("Profiling Fast Add SMC:\n");
363 measure_event(read_pmccntr_el0, profile_fast_smc_add, &fast_smc_add);
364
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100365 if (!results_within_allowed_margin(baseline.avg, fast_smc_add.avg))
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100366 return TEST_RESULT_FAIL;
367 return TEST_RESULT_SUCCESS;
368}
369#else
370test_result_t smc_psci_suspend_pc_write_retired(void)
371{
372 INFO("%s skipped on AArch32\n", __func__);
373 return TEST_RESULT_SKIPPED;
374}
375
376test_result_t smc_psci_suspend_cycles(void)
377{
378 INFO("%s skipped on AArch32\n", __func__);
379 return TEST_RESULT_SKIPPED;
380}
381
382test_result_t fast_smc_add_pc_write_retired(void)
383{
384 INFO("%s skipped on AArch32\n", __func__);
385 return TEST_RESULT_SKIPPED;
386}
387
388test_result_t fast_smc_add_cycles(void)
389{
390 INFO("%s skipped on AArch32\n", __func__);
391 return TEST_RESULT_SKIPPED;
392}
393#endif