blob: 6d70548c1c0229afea95332366e4456729220e08 [file] [log] [blame]
Boyan Karatotev35e3ca02022-10-10 16:39:45 +01001/*
2 * Copyright (c) 2022, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
Boyan Karatotev8585eef2022-10-11 17:45:28 +01008#include <arm_arch_svc.h>
Boyan Karatotev35e3ca02022-10-10 16:39:45 +01009#include <test_helpers.h>
10
11/* tests target aarch64. Aarch32 is too different to even build */
12#if defined(__aarch64__)
13
14#define PMU_EVT_INST_RETIRED 0x0008
15#define NOP_REPETITIONS 50
Boyan Karatotev8585eef2022-10-11 17:45:28 +010016#define MAX_COUNTERS 32
17
18static inline void read_all_counters(u_register_t *array, int impl_ev_ctrs)
19{
20 array[0] = read_pmccntr_el0();
21 for (int i = 0; i < impl_ev_ctrs; i++) {
22 array[i + 1] = read_pmevcntrn_el0(i);
23 }
24}
25
26static inline void read_all_counter_configs(u_register_t *array, int impl_ev_ctrs)
27{
28 array[0] = read_pmccfiltr_el0();
29 for (int i = 0; i < impl_ev_ctrs; i++) {
30 array[i + 1] = read_pmevtypern_el0(i);
31 }
32}
33
34static inline void read_all_pmu_configs(u_register_t *array)
35{
36 array[0] = read_pmcntenset_el0();
37 array[1] = read_pmcr_el0();
38 array[2] = read_pmselr_el0();
39 array[3] = (IS_IN_EL2()) ? read_mdcr_el2() : 0;
40}
Boyan Karatotev35e3ca02022-10-10 16:39:45 +010041
42static inline void enable_counting(void)
43{
44 write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_E_BIT);
45 /* this function means we are about to use the PMU, synchronize */
46 isb();
47}
48
49static inline void disable_counting(void)
50{
51 write_pmcr_el0(read_pmcr_el0() & ~PMCR_EL0_E_BIT);
52 /* we also rely that disabling really did work */
53 isb();
54}
55
56static inline void clear_counters(void)
57{
58 write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_C_BIT | PMCR_EL0_P_BIT);
59}
60
61/*
62 * tftf runs in EL2, don't bother enabling counting at lower ELs and secure
63 * world. TF-A has other controls for them and counting there doesn't impact us
64 */
65static inline void enable_cycle_counter(void)
66{
67 write_pmccfiltr_el0(PMCCFILTR_EL0_NSH_BIT);
68 write_pmcntenset_el0(read_pmcntenset_el0() | PMCNTENSET_EL0_C_BIT);
69}
70
71static inline void enable_event_counter(int ctr_num)
72{
73 write_pmevtypern_el0(ctr_num, PMEVTYPER_EL0_NSH_BIT |
74 (PMU_EVT_INST_RETIRED & PMEVTYPER_EL0_EVTCOUNT_BITS));
75 write_pmcntenset_el0(read_pmcntenset_el0() |
76 PMCNTENSET_EL0_P_BIT(ctr_num));
77}
78
79/* doesn't really matter what happens, as long as it happens a lot */
80static inline void execute_nops(void)
81{
82 for (int i = 0; i < NOP_REPETITIONS; i++) {
83 __asm__ ("orr x0, x0, x0\n");
84 }
85}
86
Boyan Karatotev8585eef2022-10-11 17:45:28 +010087static inline void execute_el3_nop(void)
88{
89 /* ask EL3 for some info, no side effects */
90 smc_args args = { SMCCC_VERSION };
91
92 /* return values don't matter */
93 tftf_smc(&args);
94}
95
Boyan Karatotev35e3ca02022-10-10 16:39:45 +010096#endif /* defined(__aarch64__) */
97
98/*
99 * try the cycle counter with some NOPs to see if it works
100 */
101test_result_t test_pmuv3_cycle_works_ns(void)
102{
103 SKIP_TEST_IF_AARCH32();
104#if defined(__aarch64__)
105 u_register_t ccounter_start;
106 u_register_t ccounter_end;
107
108 SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
109
110 enable_cycle_counter();
111 enable_counting();
112
113 ccounter_start = read_pmccntr_el0();
114 execute_nops();
115 ccounter_end = read_pmccntr_el0();
116 disable_counting();
117 clear_counters();
118
119 tftf_testcase_printf("Counted from %ld to %ld\n",
120 ccounter_start, ccounter_end);
121 if (ccounter_start != ccounter_end) {
122 return TEST_RESULT_SUCCESS;
123 }
124 return TEST_RESULT_FAIL;
125#endif /* defined(__aarch64__) */
126}
127
128/*
129 * try an event counter with some NOPs to see if it works. MDCR_EL2.HPMN can
130 * make this tricky so take extra care.
131 */
132test_result_t test_pmuv3_event_works_ns(void)
133{
134 SKIP_TEST_IF_AARCH32();
135#if defined(__aarch64__)
136 u_register_t evcounter_start;
137 u_register_t evcounter_end;
138 u_register_t mdcr_el2 = ~0;
139
140 SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
141
142 /* use the real value or use the dummy value to skip checks later */
143 if (IS_IN_EL2()) {
144 mdcr_el2 = read_mdcr_el2();
145 }
146
147 if (((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & PMCR_EL0_N_MASK) == 0) {
148 tftf_testcase_printf("No event counters implemented\n");
149 return TEST_RESULT_SKIPPED;
150 }
151
152 /* FEAT_HPMN0 only affects event counters */
153 if ((mdcr_el2 & MDCR_EL2_HPMN_MASK) == 0) {
154 if (!get_feat_hpmn0_supported()) {
155 tftf_testcase_printf(
156 "FEAT_HPMN0 not implemented but HPMN is 0\n");
157 return TEST_RESULT_FAIL;
158 }
159
160 /* the test will fail in this case */
161 if ((mdcr_el2 & MDCR_EL2_HPME_BIT) == 0) {
162 tftf_testcase_printf(
163 "HPMN is 0 and HPME is not set!\n");
164 }
165 }
166
167 enable_event_counter(0);
168 enable_counting();
169
170 /*
171 * if any are enabled it will be the very first one. HPME can disable
172 * the higher end of the counters and HPMN can put the boundary
173 * anywhere
174 */
175 evcounter_start = read_pmevcntrn_el0(0);
176 execute_nops();
177 evcounter_end = read_pmevcntrn_el0(0);
178 disable_counting();
179 clear_counters();
180
181 tftf_testcase_printf("Counted from %ld to %ld\n",
182 evcounter_start, evcounter_end);
183 if (evcounter_start != evcounter_end) {
184 return TEST_RESULT_SUCCESS;
185 }
186 return TEST_RESULT_FAIL;
187#endif /* defined(__aarch64__) */
188}
Boyan Karatotev8585eef2022-10-11 17:45:28 +0100189
190
191/*
192 * check if entering/exiting EL3 (with a NOP) preserves all PMU registers.
193 */
194test_result_t test_pmuv3_el3_preserves(void)
195{
196 SKIP_TEST_IF_AARCH32();
197#if defined(__aarch64__)
198 u_register_t ctr_start[MAX_COUNTERS] = {0};
199 u_register_t ctr_cfg_start[MAX_COUNTERS] = {0};
200 u_register_t pmu_cfg_start[4];
201 u_register_t ctr_end[MAX_COUNTERS] = {0};
202 u_register_t ctr_cfg_end[MAX_COUNTERS] = {0};
203 u_register_t pmu_cfg_end[4];
204 int impl_ev_ctrs = (read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & PMCR_EL0_N_MASK;
205
206 SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
207
208 /* start from 0 so we know we can't overflow */
209 clear_counters();
210 /* pretend counters have just been used */
211 enable_cycle_counter();
212 enable_event_counter(0);
213 enable_counting();
214 execute_nops();
215 disable_counting();
216
217 /* get before reading */
218 read_all_counters(ctr_start, impl_ev_ctrs);
219 read_all_counter_configs(ctr_cfg_start, impl_ev_ctrs);
220 read_all_pmu_configs(pmu_cfg_start);
221
222 /* give EL3 a chance to scramble everything */
223 execute_el3_nop();
224
225 /* get after reading */
226 read_all_counters(ctr_end, impl_ev_ctrs);
227 read_all_counter_configs(ctr_cfg_end, impl_ev_ctrs);
228 read_all_pmu_configs(pmu_cfg_end);
229
230 if (memcmp(ctr_start, ctr_end, sizeof(ctr_start)) != 0) {
231 tftf_testcase_printf("SMC call did not preserve counters\n");
232 return TEST_RESULT_FAIL;
233 }
234
235 if (memcmp(ctr_cfg_start, ctr_cfg_end, sizeof(ctr_cfg_start)) != 0) {
236 tftf_testcase_printf("SMC call did not preserve counter config\n");
237 return TEST_RESULT_FAIL;
238 }
239
240 if (memcmp(pmu_cfg_start, pmu_cfg_end, sizeof(pmu_cfg_start)) != 0) {
241 tftf_testcase_printf("SMC call did not preserve PMU registers\n");
242 return TEST_RESULT_FAIL;
243 }
244
245 return TEST_RESULT_SUCCESS;
246#endif /* defined(__aarch64__) */
247}