blob: fd7a077fbf685dabb5cacb64ed7880894f023a69 [file] [log] [blame]
Boyan Karatotev35e3ca02022-10-10 16:39:45 +01001/*
2 * Copyright (c) 2022, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
8#include <test_helpers.h>
9
10/* tests target aarch64. Aarch32 is too different to even build */
11#if defined(__aarch64__)
12
13#define PMU_EVT_INST_RETIRED 0x0008
14#define NOP_REPETITIONS 50
15
16static inline void enable_counting(void)
17{
18 write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_E_BIT);
19 /* this function means we are about to use the PMU, synchronize */
20 isb();
21}
22
23static inline void disable_counting(void)
24{
25 write_pmcr_el0(read_pmcr_el0() & ~PMCR_EL0_E_BIT);
26 /* we also rely that disabling really did work */
27 isb();
28}
29
30static inline void clear_counters(void)
31{
32 write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_C_BIT | PMCR_EL0_P_BIT);
33}
34
35/*
36 * tftf runs in EL2, don't bother enabling counting at lower ELs and secure
37 * world. TF-A has other controls for them and counting there doesn't impact us
38 */
39static inline void enable_cycle_counter(void)
40{
41 write_pmccfiltr_el0(PMCCFILTR_EL0_NSH_BIT);
42 write_pmcntenset_el0(read_pmcntenset_el0() | PMCNTENSET_EL0_C_BIT);
43}
44
45static inline void enable_event_counter(int ctr_num)
46{
47 write_pmevtypern_el0(ctr_num, PMEVTYPER_EL0_NSH_BIT |
48 (PMU_EVT_INST_RETIRED & PMEVTYPER_EL0_EVTCOUNT_BITS));
49 write_pmcntenset_el0(read_pmcntenset_el0() |
50 PMCNTENSET_EL0_P_BIT(ctr_num));
51}
52
53/* doesn't really matter what happens, as long as it happens a lot */
54static inline void execute_nops(void)
55{
56 for (int i = 0; i < NOP_REPETITIONS; i++) {
57 __asm__ ("orr x0, x0, x0\n");
58 }
59}
60
61#endif /* defined(__aarch64__) */
62
63/*
64 * try the cycle counter with some NOPs to see if it works
65 */
66test_result_t test_pmuv3_cycle_works_ns(void)
67{
68 SKIP_TEST_IF_AARCH32();
69#if defined(__aarch64__)
70 u_register_t ccounter_start;
71 u_register_t ccounter_end;
72
73 SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
74
75 enable_cycle_counter();
76 enable_counting();
77
78 ccounter_start = read_pmccntr_el0();
79 execute_nops();
80 ccounter_end = read_pmccntr_el0();
81 disable_counting();
82 clear_counters();
83
84 tftf_testcase_printf("Counted from %ld to %ld\n",
85 ccounter_start, ccounter_end);
86 if (ccounter_start != ccounter_end) {
87 return TEST_RESULT_SUCCESS;
88 }
89 return TEST_RESULT_FAIL;
90#endif /* defined(__aarch64__) */
91}
92
93/*
94 * try an event counter with some NOPs to see if it works. MDCR_EL2.HPMN can
95 * make this tricky so take extra care.
96 */
97test_result_t test_pmuv3_event_works_ns(void)
98{
99 SKIP_TEST_IF_AARCH32();
100#if defined(__aarch64__)
101 u_register_t evcounter_start;
102 u_register_t evcounter_end;
103 u_register_t mdcr_el2 = ~0;
104
105 SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
106
107 /* use the real value or use the dummy value to skip checks later */
108 if (IS_IN_EL2()) {
109 mdcr_el2 = read_mdcr_el2();
110 }
111
112 if (((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & PMCR_EL0_N_MASK) == 0) {
113 tftf_testcase_printf("No event counters implemented\n");
114 return TEST_RESULT_SKIPPED;
115 }
116
117 /* FEAT_HPMN0 only affects event counters */
118 if ((mdcr_el2 & MDCR_EL2_HPMN_MASK) == 0) {
119 if (!get_feat_hpmn0_supported()) {
120 tftf_testcase_printf(
121 "FEAT_HPMN0 not implemented but HPMN is 0\n");
122 return TEST_RESULT_FAIL;
123 }
124
125 /* the test will fail in this case */
126 if ((mdcr_el2 & MDCR_EL2_HPME_BIT) == 0) {
127 tftf_testcase_printf(
128 "HPMN is 0 and HPME is not set!\n");
129 }
130 }
131
132 enable_event_counter(0);
133 enable_counting();
134
135 /*
136 * if any are enabled it will be the very first one. HPME can disable
137 * the higher end of the counters and HPMN can put the boundary
138 * anywhere
139 */
140 evcounter_start = read_pmevcntrn_el0(0);
141 execute_nops();
142 evcounter_end = read_pmevcntrn_el0(0);
143 disable_counting();
144 clear_counters();
145
146 tftf_testcase_printf("Counted from %ld to %ld\n",
147 evcounter_start, evcounter_end);
148 if (evcounter_start != evcounter_end) {
149 return TEST_RESULT_SUCCESS;
150 }
151 return TEST_RESULT_FAIL;
152#endif /* defined(__aarch64__) */
153}