blob: 82b81f4eca1128899cc74465b20431ef30f25d0e [file] [log] [blame]
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +01001/*
Boyan Karatotev794b0ac2025-06-20 13:13:29 +01002 * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7/*
8 * This file contains tests that try to leak information from the secure world
9 * to the non-secure world (EL2) by using the PMU counters.
10 *
11 * The tests assume that the PMU (PMUv3) is implemented on the target, since
12 * TF-A performs initialization of the PMU and guards against PMU counter
13 * leakage.
14 *
15 * The non-secure world can use system registers to configure the PMU such that
16 * it increments counters in the secure world. Depending on the implemented
17 * features, the secure world can prohibit counting via the following:
18 * -v8.2 Debug not implemented:
19 * |-- Prohibit general event counters and the cycle counter:
20 * MDCR_EL3.SPME == 0 && !ExternalSecureNoninvasiveDebugEnabled()
21 * Since ExternalSecureNoninvasiveDebugEnabled() is a hardware
22 * line, it is not available on FVP and will therefore cause the
23 * tests to fail.
24 * The only other way is to disable the PMCR_EL0.E bit. This will
25 * disable counting altogether, but since this fix is not desired
26 * in TF-A, the tests have to be skipped if v8.2 Debug is not
27 * implemented.
28 *
29 * -v8.2 Debug implemented:
30 * |-- Prohibit general event counters: MDCR_EL3.SPME == 0. This bit
31 * resets to 0, so by default general events should not be counted
32 * in the secure world.
33 * |-- Prohibit cycle counter: MDCR_EL3.SPME == 0 && PMCR_EL0.DP == 1.
34 * This counter is only affected by MDCR_EL3.SPME if the
35 * PMCR_EL0.DP bit is set.
36 *
37 * -v8.5 implemented:
38 * |-- Prohibit general event counters: as in v8.2 Debug.
39 * |-- Prohibit cycle counter: MDCR_EL3.SCCD == 1
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +010040 *
41 * In Aarch32 state the PMU registers have identical names (apart from the
42 * '_EL0' suffix) and bit fields. As far as the PMU is concerned, the Aarch32
43 * counterpart of MDCR_EL3 is the SDCR register, which has both the SCCD and
44 * SPME bits.
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +010045 */
46
47#include <drivers/arm/arm_gic.h>
48#include <irq.h>
49#include <platform.h>
50#include <power_management.h>
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +010051#include <string.h>
52#include <test_helpers.h>
53
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +010054#define ITERATIONS_CNT 1000
55
56/*
57 * A maximum of +10% deviation in event counts is tolerated.
58 * This is useful for testing on real hardware where event counts are usually
59 * not the same between runs. The large iteration count should cause the
60 * average event count to converge to values very close to baseline when the
61 * secure world successfully prohibits PMU counters from incrementing.
62 */
63#define ALLOWED_DEVIATION 10
64
65/*
66 * An invalid SMC function number.
67 * Used to establish a base value for PMU counters on each test.
68 */
69#define INVALID_FN 0x666
70
71struct pmu_event_info {
72 unsigned long long min;
73 unsigned long long max;
74 unsigned long long avg;
75};
76
Deepika Bhavnanic249d5e2020-02-06 16:29:45 -060077#ifdef __aarch64__
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +010078#define V8_2_DEBUG_ARCH_SUPPORTED ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED
79#else
80#define V8_2_DEBUG_ARCH_SUPPORTED DBGDIDR_V8_2_DEBUG_ARCH_SUPPORTED
81#endif
82
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +010083static inline void configure_pmu_cntr0(const uint32_t event)
84{
85 /*
86 * Disabling the P bit tells the counter to increment at EL1.
87 * Setting the NSK bit to be different from the P bit further tells the
88 * counter NOT to increment at non-secure EL1. Combined with the P bit,
89 * the effect is to tell the counter to increment at secure EL1.
90 * Setting the M bit to be equal to the P bit tells the counter to
91 * increment at EL3.
92 * Disabling the NSH bit tells the counter NOT to increment at
93 * non-secure EL2.
94 * Setting the SH bit to be different to the NSH bit tells the counter
95 * to increment at secure EL2.
96 * The counter therefore is told to count only at secure EL1, secure EL2
97 * and EL3. This is to ensure maximum accuracy of the results, since we
98 * are only interested if the secure world is leaking PMU counters.
99 */
100 write_pmevtyper0_el0(
101 (read_pmevtyper0_el0() | PMEVTYPER_EL0_NSK_BIT |
102 PMEVTYPER_EL0_SH_BIT) &
103 ~(PMEVTYPER_EL0_P_BIT | PMEVTYPER_EL0_NSH_BIT |
104 PMEVTYPER_EL0_M_BIT));
105
106 /*
107 * Write to the EVTCOUNT bits to tell the counter which event to
108 * monitor.
109 */
110 write_pmevtyper0_el0(
111 (read_pmevtyper0_el0() & ~PMEVTYPER_EL0_EVTCOUNT_BITS) | event);
112
113 /* Setting the P[n] bit enables counter n */
114 write_pmcntenset_el0(
115 read_pmcntenset_el0() | PMCNTENSET_EL0_P_BIT(0));
116}
117
118static inline void configure_pmu_cycle_cntr(void)
119{
120 /*
121 * Disabling the P bit tells the counter to increment at EL1.
122 * Setting the NSK bit to be different from the P bit further tells the
123 * counter NOT to increment at non-secure EL1. Combined with the P bit,
124 * the effect is to tell the counter to increment at secure EL1.
125 * Setting the M bit to be equal to the P bit tells the counter to
126 * increment at EL3.
127 * Disabling the NSH bit tells the counter NOT to increment at
128 * non-secure EL2.
129 * Setting the SH bit to be different to the NSH bit tells the counter
130 * to increment at secure EL2.
131 * The counter therefore is told to count only at secure EL1, secure EL2
132 * and EL3. This is to ensure maximum accuracy of the results, since we
133 * are only interested if the secure world is leaking PMU counters.
134 */
135 write_pmccfiltr_el0(
136 (read_pmccfiltr_el0() | PMCCFILTR_EL0_NSK_BIT |
137 PMCCFILTR_EL0_SH_BIT) &
138 ~(PMCCFILTR_EL0_P_BIT | PMCCFILTR_EL0_NSH_BIT |
139 PMCCFILTR_EL0_M_BIT));
140
141 /* Setting the C bit enables the cycle counter in the PMU */
142 write_pmcntenset_el0(
143 read_pmcntenset_el0() | PMCNTENSET_EL0_C_BIT);
144
145 /*
146 * Disabling the DP bit makes the cycle counter increment where
147 * prohibited by MDCR_EL3.SPME. If higher execution levels don't save
148 * and restore PMCR_EL0, then PMU information will be leaked.
149 */
150 write_pmcr_el0(read_pmcr_el0() & ~PMCR_EL0_DP_BIT);
151}
152
153static inline void pmu_enable_counting(void)
154{
155 /*
156 * Setting the E bit gives [fine-grained] control to the PMCNTENSET_EL0
157 * register, which controls which counters can increment.
158 */
159 write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_E_BIT);
160}
161
162static unsigned long long profile_invalid_smc(u_register_t (*read_cntr_f)(void))
163{
164 unsigned long long evt_cnt;
165 smc_args args = { INVALID_FN };
166
167 evt_cnt = (*read_cntr_f)();
168 tftf_smc(&args);
169 evt_cnt = (*read_cntr_f)() - evt_cnt;
170
171 return evt_cnt;
172}
173
174static unsigned long long profile_cpu_suspend(u_register_t (*read_cntr_f)(void))
175{
176 unsigned long long evt_cnt;
177 unsigned int power_state;
178 unsigned int stateid;
179
180 tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
181 PSTATE_TYPE_STANDBY, &stateid);
182 power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0,
183 PSTATE_TYPE_STANDBY, stateid);
184
Boyan Karatotev6d144db2025-06-23 15:04:53 +0100185 tftf_irq_enable_sgi(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100186
187 /*
188 * Mask IRQ to prevent the interrupt handler being invoked
189 * and clearing the interrupt. A pending interrupt will cause this
190 * CPU to wake-up from suspend.
191 */
192 disable_irq();
193
194 /* Configure an SGI to wake-up from suspend */
195 tftf_send_sgi(IRQ_NS_SGI_0,
196 platform_get_core_pos(read_mpidr_el1() & MPID_MASK));
197
198 evt_cnt = (*read_cntr_f)();
199 tftf_cpu_suspend(power_state);
200 evt_cnt = (*read_cntr_f)() - evt_cnt;
201
202 /* Unmask the IRQ to let the interrupt handler to execute */
203 enable_irq();
204 isb();
205
Boyan Karatotev6d144db2025-06-23 15:04:53 +0100206 tftf_irq_disable_sgi(IRQ_NS_SGI_0);
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100207
208 return evt_cnt;
209}
210
211static unsigned long long profile_fast_smc_add(u_register_t (*read_cntr_f)(void))
212{
213 unsigned long long evt_cnt;
214 smc_args args = { TSP_FAST_FID(TSP_ADD), 4, 6 };
215
216 evt_cnt = (*read_cntr_f)();
217 tftf_smc(&args);
218 evt_cnt = (*read_cntr_f)() - evt_cnt;
219
220 return evt_cnt;
221}
222
223static void measure_event(u_register_t (*read_cntr_func)(void),
224 unsigned long long (*profile_func)(u_register_t (*read_cntr_f)(void)),
225 struct pmu_event_info *info)
226{
227 unsigned long long evt_cnt;
228 unsigned long long min_cnt;
229 unsigned long long max_cnt;
230 unsigned long long avg_cnt;
231 unsigned long long cnt_sum = 0;
232
233 min_cnt = UINT64_MAX;
234 max_cnt = 0;
235
236 for (unsigned int i = 0; i < ITERATIONS_CNT; ++i) {
237 evt_cnt = (*profile_func)(read_cntr_func);
238
239 min_cnt = MIN(min_cnt, evt_cnt);
240 max_cnt = MAX(max_cnt, evt_cnt);
241
242 cnt_sum += evt_cnt;
243
Boyan Karatotev6d144db2025-06-23 15:04:53 +0100244 tftf_irq_disable_sgi(IRQ_NS_SGI_0);
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100245 }
246
247 avg_cnt = cnt_sum / ITERATIONS_CNT;
248
249 info->avg = avg_cnt;
250 info->min = min_cnt;
251 info->max = max_cnt;
252
253 tftf_testcase_printf(
254 "Average count: %llu (ranging from %llu to %llu)\n",
255 avg_cnt,
256 min_cnt,
257 max_cnt);
258}
259
260/*
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100261 * Checks that when requesting an SMC call after getting a baseline PMU event
262 * count the number of PMU events counted is either not greater than the
263 * baseline or it has increased by no more than ALLOWED_DEVIATION%. The first
264 * comparison is required because of underflow on unsigned types.
265 * This is used to determine if PMU timing information has been leaked from the
266 * secure world.
267 */
268static bool results_within_allowed_margin(unsigned long long baseline_cnt,
269 unsigned long long smc_cnt)
270{
271 return (smc_cnt <= baseline_cnt) ||
272 (smc_cnt - baseline_cnt <= baseline_cnt / ALLOWED_DEVIATION);
273}
274
275/*
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100276 * Measure the number of retired writes to the PC in the PSCI_SUSPEND SMC.
277 * This test only succeeds if no useful information about the PMU counters has
278 * been leaked.
279 */
280test_result_t smc_psci_suspend_pc_write_retired(void)
281{
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100282#if ARM_ARCH_MAJOR < 8
283 INFO("%s skipped on ARMv7 and earlier\n", __func__);
284 return TEST_RESULT_SKIPPED;
285#else
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100286 struct pmu_event_info baseline, cpu_suspend;
287
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100288 SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(V8_2_DEBUG_ARCH_SUPPORTED);
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100289
290 configure_pmu_cntr0(PMU_EV_PC_WRITE_RETIRED);
291 pmu_enable_counting();
292
293 tftf_testcase_printf("Getting baseline event count:\n");
294 measure_event(read_pmevcntr0_el0, profile_invalid_smc, &baseline);
295 tftf_testcase_printf("Profiling PSCI_SUSPEND_PC:\n");
296 measure_event(read_pmevcntr0_el0, profile_cpu_suspend, &cpu_suspend);
297
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100298 if (!results_within_allowed_margin(baseline.avg, cpu_suspend.avg))
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100299 return TEST_RESULT_FAIL;
300 return TEST_RESULT_SUCCESS;
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100301#endif
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100302}
303
304/*
305 * Measure the CPU cycles count of the PSCI_SUSPEND SMC.
306 * This test only succeeds if no useful information about the PMU counters has
307 * been leaked.
308 */
309test_result_t smc_psci_suspend_cycles(void)
310{
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100311#if ARM_ARCH_MAJOR < 8
312 INFO("%s skipped on ARMv7 and earlier\n", __func__);
313 return TEST_RESULT_SKIPPED;
314#else
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100315 struct pmu_event_info baseline, cpu_suspend;
316
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100317 SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(V8_2_DEBUG_ARCH_SUPPORTED);
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100318
319 configure_pmu_cycle_cntr();
320 pmu_enable_counting();
321
322 tftf_testcase_printf("Getting baseline event count:\n");
323 measure_event(read_pmccntr_el0, profile_invalid_smc, &baseline);
324 tftf_testcase_printf("Profiling PSCI_SUSPEND_PC:\n");
325 measure_event(read_pmccntr_el0, profile_cpu_suspend, &cpu_suspend);
326
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100327 if (!results_within_allowed_margin(baseline.avg, cpu_suspend.avg))
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100328 return TEST_RESULT_FAIL;
329 return TEST_RESULT_SUCCESS;
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100330#endif
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100331}
332
333/*
334 * Measure the number of retired writes to the PC in the fast add SMC.
335 * This test only succeeds if no useful information about the PMU counters has
336 * been leaked.
337 */
338test_result_t fast_smc_add_pc_write_retired(void)
339{
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100340#if ARM_ARCH_MAJOR < 8
341 INFO("%s skipped on ARMv7 and earlier\n", __func__);
342 return TEST_RESULT_SKIPPED;
343#else
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100344 struct pmu_event_info baseline, fast_smc_add;
345
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100346 SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(V8_2_DEBUG_ARCH_SUPPORTED);
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100347
348 SKIP_TEST_IF_TSP_NOT_PRESENT();
349
350 configure_pmu_cntr0(PMU_EV_PC_WRITE_RETIRED);
351 pmu_enable_counting();
352
353 tftf_testcase_printf("Getting baseline event count:\n");
354 measure_event(read_pmevcntr0_el0, profile_invalid_smc, &baseline);
355 tftf_testcase_printf("Profiling Fast Add SMC:\n");
356 measure_event(read_pmevcntr0_el0, profile_fast_smc_add, &fast_smc_add);
357
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100358 if (!results_within_allowed_margin(baseline.avg, fast_smc_add.avg))
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100359 return TEST_RESULT_FAIL;
360 return TEST_RESULT_SUCCESS;
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100361#endif
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100362}
363
364/*
365 * Measure the CPU cycles count of the fast add SMC.
366 * This test only succeeds if no useful information about the PMU counters has
367 * been leaked.
368 */
369test_result_t fast_smc_add_cycles(void)
370{
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100371#if ARM_ARCH_MAJOR < 8
372 INFO("%s skipped on ARMv7 and earlier\n", __func__);
373 return TEST_RESULT_SKIPPED;
374#else
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100375 struct pmu_event_info baseline, fast_smc_add;
376
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100377 SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(V8_2_DEBUG_ARCH_SUPPORTED);
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100378
379 SKIP_TEST_IF_TSP_NOT_PRESENT();
380
381 configure_pmu_cycle_cntr();
382 pmu_enable_counting();
383
384 tftf_testcase_printf("Getting baseline event count:\n");
385 measure_event(read_pmccntr_el0, profile_invalid_smc, &baseline);
386 tftf_testcase_printf("Profiling Fast Add SMC:\n");
387 measure_event(read_pmccntr_el0, profile_fast_smc_add, &fast_smc_add);
388
Petre-Ionut Tudor85926bc2019-10-09 10:56:39 +0100389 if (!results_within_allowed_margin(baseline.avg, fast_smc_add.avg))
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100390 return TEST_RESULT_FAIL;
391 return TEST_RESULT_SUCCESS;
Petre-Ionut Tudorf68ebdb2019-09-18 16:13:00 +0100392#endif
Petre-Ionut Tudorf1a45f72019-10-08 16:51:45 +0100393}