blob: 214f6df68521ff74f19b3057bd49eed7e124c9fa [file] [log] [blame]
AlexeiFedorov2f30f102023-03-13 19:37:46 +00001/*
2 * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
8#include <arm_arch_svc.h>
9#include <debug.h>
10#include <drivers/arm/gic_v3.h>
11
12#include <host_realm_pmu.h>
13#include <realm_rsi.h>
14
15/* PMUv3 events */
16#define PMU_EVT_SW_INCR 0x0
17#define PMU_EVT_INST_RETIRED 0x8
18#define PMU_EVT_CPU_CYCLES 0x11
19#define PMU_EVT_MEM_ACCESS 0x13
20
21#define NOP_REPETITIONS 50
AlexeiFedorov2f30f102023-03-13 19:37:46 +000022
23#define PRE_OVERFLOW ~(0xF)
24
25#define DELAY_MS 3000ULL
26
27static inline void read_all_counters(u_register_t *array, int impl_ev_ctrs)
28{
29 array[0] = read_pmccntr_el0();
30 for (unsigned int i = 0U; i < impl_ev_ctrs; i++) {
31 array[i + 1] = read_pmevcntrn_el0(i);
32 }
33}
34
35static inline void read_all_counter_configs(u_register_t *array, int impl_ev_ctrs)
36{
37 array[0] = read_pmccfiltr_el0();
38 for (unsigned int i = 0U; i < impl_ev_ctrs; i++) {
39 array[i + 1] = read_pmevtypern_el0(i);
40 }
41}
42
43static inline void read_all_pmu_configs(u_register_t *array)
44{
45 array[0] = read_pmcntenset_el0();
46 array[1] = read_pmcr_el0();
47 array[2] = read_pmselr_el0();
48}
49
50static inline void enable_counting(void)
51{
52 write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_E_BIT);
53 /* This function means we are about to use the PMU, synchronize */
54 isb();
55}
56
57static inline void disable_counting(void)
58{
59 write_pmcr_el0(read_pmcr_el0() & ~PMCR_EL0_E_BIT);
60 /* We also rely that disabling really did work */
61 isb();
62}
63
64static inline void clear_counters(void)
65{
66 write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_C_BIT | PMCR_EL0_P_BIT);
67 isb();
68}
69
70static void pmu_reset(void)
71{
72 /* Reset all counters */
73 write_pmcr_el0(read_pmcr_el0() |
74 PMCR_EL0_DP_BIT | PMCR_EL0_C_BIT | PMCR_EL0_P_BIT);
75
76 /* Disable all counters */
77 write_pmcntenclr_el0(PMU_CLEAR_ALL);
78
79 /* Clear overflow status */
80 write_pmovsclr_el0(PMU_CLEAR_ALL);
81
82 /* Disable overflow interrupts on all counters */
83 write_pmintenclr_el1(PMU_CLEAR_ALL);
84 isb();
85}
86
87/*
88 * This test runs in Realm EL1, don't bother enabling counting at lower ELs
89 * and secure world. TF-A has other controls for them and counting there
90 * doesn't impact us.
91 */
92static inline void enable_cycle_counter(void)
93{
94 /*
95 * Set PMCCFILTR_EL0.U != PMCCFILTR_EL0.RLU
96 * to disable counting in Realm EL0.
97 * Set PMCCFILTR_EL0.P = PMCCFILTR_EL0.RLK
98 * to enable counting in Realm EL1.
99 * Set PMCCFILTR_EL0.NSH = PMCCFILTR_EL0_EL0.RLH
100 * to disable event counting in Realm EL2.
101 */
102 write_pmccfiltr_el0(PMCCFILTR_EL0_U_BIT |
103 PMCCFILTR_EL0_P_BIT | PMCCFILTR_EL0_RLK_BIT |
104 PMCCFILTR_EL0_NSH_BIT | PMCCFILTR_EL0_RLH_BIT);
105 write_pmcntenset_el0(read_pmcntenset_el0() | PMCNTENSET_EL0_C_BIT);
106 isb();
107}
108
109static inline void enable_event_counter(int ctr_num)
110{
111 /*
112 * Set PMEVTYPER_EL0.U != PMEVTYPER_EL0.RLU
113 * to disable event counting in Realm EL0.
114 * Set PMEVTYPER_EL0.P = PMEVTYPER_EL0.RLK
115 * to enable counting in Realm EL1.
116 * Set PMEVTYPER_EL0.NSH = PMEVTYPER_EL0.RLH
117 * to disable event counting in Realm EL2.
118 */
119 write_pmevtypern_el0(ctr_num,
120 PMEVTYPER_EL0_U_BIT |
121 PMEVTYPER_EL0_P_BIT | PMEVTYPER_EL0_RLK_BIT |
122 PMEVTYPER_EL0_NSH_BIT | PMEVTYPER_EL0_RLH_BIT |
123 (PMU_EVT_INST_RETIRED & PMEVTYPER_EL0_EVTCOUNT_BITS));
124 write_pmcntenset_el0(read_pmcntenset_el0() |
125 PMCNTENSET_EL0_P_BIT(ctr_num));
126 isb();
127}
128
129/* Doesn't really matter what happens, as long as it happens a lot */
130static inline void execute_nops(void)
131{
132 for (unsigned int i = 0U; i < NOP_REPETITIONS; i++) {
133 __asm__ ("orr x0, x0, x0\n");
134 }
135}
136
137/*
138 * Try the cycle counter with some NOPs to see if it works
139 */
140bool test_pmuv3_cycle_works_realm(void)
141{
142 u_register_t ccounter_start;
143 u_register_t ccounter_end;
144
145 pmu_reset();
146
147 enable_cycle_counter();
148 enable_counting();
149
150 ccounter_start = read_pmccntr_el0();
151 execute_nops();
152 ccounter_end = read_pmccntr_el0();
153 disable_counting();
154 clear_counters();
155
Shruti Guptaa276b202023-12-18 10:07:43 +0000156 realm_printf("counted from %lu to %lu\n",
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000157 ccounter_start, ccounter_end);
158 if (ccounter_start != ccounter_end) {
159 return true;
160 }
161 return false;
162}
163
Shruti Guptab1b37922024-01-13 21:49:04 +0000164/* Test if max counter available is same as that programmed by host */
165bool test_pmuv3_counter(void)
166{
167 uint64_t num_cnts, num_cnts_host;
168
169 num_cnts_host = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
170 num_cnts = GET_PMU_CNT;
171 realm_printf("CPU=%u num_cnts=%lu num_cnts_host=%lu\n", read_mpidr_el1() & MPID_MASK,
172 num_cnts, num_cnts_host);
173 if (num_cnts == num_cnts_host) {
174 return true;
175 }
176 return false;
177}
178
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000179/*
180 * Try an event counter with some NOPs to see if it works.
181 */
182bool test_pmuv3_event_works_realm(void)
183{
184 u_register_t evcounter_start;
185 u_register_t evcounter_end;
186
Shruti Guptab1b37922024-01-13 21:49:04 +0000187 if (GET_PMU_CNT == 0) {
Shruti Guptaa276b202023-12-18 10:07:43 +0000188 realm_printf("no event counters implemented\n");
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000189 return false;
190 }
191
192 pmu_reset();
193
194 enable_event_counter(0);
195 enable_counting();
196
197 /*
198 * If any is enabled it will be in the first range.
199 */
200 evcounter_start = read_pmevcntrn_el0(0);
201 execute_nops();
202 disable_counting();
203 evcounter_end = read_pmevcntrn_el0(0);
204 clear_counters();
205
Shruti Guptaa276b202023-12-18 10:07:43 +0000206 realm_printf("counted from %lu to %lu\n",
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000207 evcounter_start, evcounter_end);
208 if (evcounter_start != evcounter_end) {
209 return true;
210 }
211 return false;
212}
213
214/*
215 * Check if entering/exiting RMM (with a NOP) preserves all PMU registers.
216 */
217bool test_pmuv3_rmm_preserves(void)
218{
219 u_register_t ctr_start[MAX_COUNTERS] = {0};
220 u_register_t ctr_cfg_start[MAX_COUNTERS] = {0};
221 u_register_t pmu_cfg_start[3];
222 u_register_t ctr_end[MAX_COUNTERS] = {0};
223 u_register_t ctr_cfg_end[MAX_COUNTERS] = {0};
224 u_register_t pmu_cfg_end[3];
Shruti Guptab1b37922024-01-13 21:49:04 +0000225 unsigned int impl_ev_ctrs = GET_PMU_CNT;
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000226
Shruti Guptaa276b202023-12-18 10:07:43 +0000227 realm_printf("testing %u event counters\n", impl_ev_ctrs);
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000228
229 pmu_reset();
230
231 /* Pretend counters have just been used */
232 enable_cycle_counter();
233 enable_event_counter(0);
234 enable_counting();
235 execute_nops();
236 disable_counting();
237
238 /* Get before reading */
239 read_all_counters(ctr_start, impl_ev_ctrs);
240 read_all_counter_configs(ctr_cfg_start, impl_ev_ctrs);
241 read_all_pmu_configs(pmu_cfg_start);
242
243 /* Give RMM a chance to scramble everything */
Shruti Gupta40de8ec2023-10-12 21:45:12 +0100244 (void)rsi_get_version(RSI_ABI_VERSION_VAL);
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000245
246 /* Get after reading */
247 read_all_counters(ctr_end, impl_ev_ctrs);
248 read_all_counter_configs(ctr_cfg_end, impl_ev_ctrs);
249 read_all_pmu_configs(pmu_cfg_end);
250
251 if (memcmp(ctr_start, ctr_end, sizeof(ctr_start)) != 0) {
Shruti Guptaa276b202023-12-18 10:07:43 +0000252 realm_printf("SMC call did not preserve %s\n",
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000253 "counters");
254 return false;
255 }
256
257 if (memcmp(ctr_cfg_start, ctr_cfg_end, sizeof(ctr_cfg_start)) != 0) {
Shruti Guptaa276b202023-12-18 10:07:43 +0000258 realm_printf("SMC call did not preserve %s\n",
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000259 "counter config");
260 return false;
261 }
262
263 if (memcmp(pmu_cfg_start, pmu_cfg_end, sizeof(pmu_cfg_start)) != 0) {
Shruti Guptaa276b202023-12-18 10:07:43 +0000264 realm_printf("SMC call did not preserve %s\n",
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000265 "PMU registers");
266 return false;
267 }
268
269 return true;
270}
271
272bool test_pmuv3_overflow_interrupt(void)
273{
274 unsigned long priority_bits, priority;
275 uint64_t delay_time = DELAY_MS;
276
277 pmu_reset();
278
279 /* Get the number of priority bits implemented */
280 priority_bits = ((read_icv_ctrl_el1() >> ICV_CTLR_EL1_PRIbits_SHIFT) &
281 ICV_CTLR_EL1_PRIbits_MASK) + 1UL;
282
283 /* Unimplemented bits are RES0 and start from LSB */
284 priority = (0xFFUL << (8UL - priority_bits)) & 0xFFUL;
285
286 /* Set the priority mask register to allow all interrupts */
287 write_icv_pmr_el1(priority);
288
289 /* Enable Virtual Group 1 interrupts */
290 write_icv_igrpen1_el1(ICV_IGRPEN1_EL1_Enable);
291
292 /* Enable IRQ */
293 enable_irq();
294
295 write_pmevcntrn_el0(0, PRE_OVERFLOW);
296 enable_event_counter(0);
297
298 /* Enable interrupt on event counter #0 */
299 write_pmintenset_el1((1UL << 0));
300
Shruti Guptaa276b202023-12-18 10:07:43 +0000301 realm_printf("waiting for PMU vIRQ...\n");
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000302
303 enable_counting();
304 execute_nops();
305
306 /*
307 * Interrupt handler will clear
308 * Performance Monitors Interrupt Enable Set register
309 * as part of handling the overflow interrupt.
310 */
311 while ((read_pmintenset_el1() != 0UL) && (delay_time != 0ULL)) {
312 --delay_time;
313 }
314
315 /* Disable IRQ */
316 disable_irq();
317
318 pmu_reset();
319
320 if (delay_time == 0ULL) {
Shruti Guptaa276b202023-12-18 10:07:43 +0000321 realm_printf("PMU vIRQ %sreceived in %llums\n", "not ",
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000322 DELAY_MS);
323 return false;
324 }
325
Shruti Guptaa276b202023-12-18 10:07:43 +0000326 realm_printf("PMU vIRQ %sreceived in %llums\n", "",
AlexeiFedorov2f30f102023-03-13 19:37:46 +0000327 DELAY_MS - delay_time);
328
329 return true;
330}