blob: a7898eff0aef6b75bcf173a639a563eb8959ce35 [file] [log] [blame]
Dimitris Papastamos380559c2017-10-12 13:02:29 +01001/*
Elizabeth Ho461c0a52023-07-18 14:10:25 +01002 * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
Dimitris Papastamos380559c2017-10-12 13:02:29 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00007#include <assert.h>
Chris Kay33b9be62021-05-26 11:58:23 +01008#include <cdefs.h>
Scott Branden4ce3e992020-08-25 13:49:32 -07009#include <inttypes.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000010#include <stdbool.h>
Scott Branden4ce3e992020-08-25 13:49:32 -070011#include <stdint.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000012
Chris Kaye747a592021-05-24 20:35:26 +010013#include "../amu_private.h"
Dimitris Papastamos380559c2017-10-12 13:02:29 +010014#include <arch.h>
johpow01873d4242020-10-02 13:41:11 -050015#include <arch_features.h>
Dimitris Papastamos380559c2017-10-12 13:02:29 +010016#include <arch_helpers.h>
Chris Kay742ca232021-08-19 11:21:52 +010017#include <common/debug.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000018#include <lib/el3_runtime/pubsub_events.h>
19#include <lib/extensions/amu.h>
Alexei Fedorovf3ccf032020-07-14 08:17:56 +010020
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000021#include <plat/common/platform.h>
Dimitris Papastamos380559c2017-10-12 13:02:29 +010022
Chris Kay742ca232021-08-19 11:21:52 +010023#if ENABLE_AMU_FCONF
24# include <lib/fconf/fconf.h>
25# include <lib/fconf/fconf_amu_getter.h>
26#endif
27
Chris Kaye747a592021-05-24 20:35:26 +010028struct amu_ctx {
29 uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
30#if ENABLE_AMU_AUXILIARY_COUNTERS
31 uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
32#endif
33
34 /* Architected event counter 1 does not have an offset register */
35 uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U];
36#if ENABLE_AMU_AUXILIARY_COUNTERS
37 uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS];
38#endif
39
40 uint16_t group0_enable;
41#if ENABLE_AMU_AUXILIARY_COUNTERS
42 uint16_t group1_enable;
43#endif
44};
45
46static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
47
48CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
49 amu_ctx_group0_enable_cannot_represent_all_group0_counters);
50
51#if ENABLE_AMU_AUXILIARY_COUNTERS
52CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
53 amu_ctx_group1_enable_cannot_represent_all_group1_counters);
54#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +000055
Chris Kay33b9be62021-05-26 11:58:23 +010056static inline __unused uint64_t read_hcr_el2_amvoffen(void)
57{
58 return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
59 HCR_AMVOFFEN_SHIFT;
60}
61
62static inline __unused void write_cptr_el2_tam(uint64_t value)
63{
64 write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
65 ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
66}
67
John Powella4c39452022-03-29 00:25:59 -050068static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen)
69{
70 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
71
72 value &= ~SCR_AMVOFFEN_BIT;
73 value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT;
74
75 write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value);
76}
77
Chris Kay33b9be62021-05-26 11:58:23 +010078static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
79{
80 write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
81 ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
82}
83
84static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
85{
86 write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
87 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
88}
89
90static inline __unused uint64_t read_amcfgr_el0_ncg(void)
91{
92 return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
93 AMCFGR_EL0_NCG_MASK;
94}
95
Chris Kaye747a592021-05-24 20:35:26 +010096static inline __unused uint64_t read_amcgcr_el0_cg0nc(void)
Chris Kay81e2ff12021-05-25 12:33:18 +010097{
98 return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
99 AMCGCR_EL0_CG0NC_MASK;
100}
101
Chris Kay33b9be62021-05-26 11:58:23 +0100102static inline __unused uint64_t read_amcg1idr_el0_voff(void)
103{
104 return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
105 AMCG1IDR_VOFF_MASK;
106}
107
108static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
109{
110 return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
111 AMCGCR_EL0_CG1NC_MASK;
112}
113
114static inline __unused uint64_t read_amcntenset0_el0_px(void)
115{
116 return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
117 AMCNTENSET0_EL0_Pn_MASK;
118}
119
120static inline __unused uint64_t read_amcntenset1_el0_px(void)
121{
122 return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
123 AMCNTENSET1_EL0_Pn_MASK;
124}
125
126static inline __unused void write_amcntenset0_el0_px(uint64_t px)
127{
128 uint64_t value = read_amcntenset0_el0();
129
130 value &= ~AMCNTENSET0_EL0_Pn_MASK;
131 value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
132
133 write_amcntenset0_el0(value);
134}
135
136static inline __unused void write_amcntenset1_el0_px(uint64_t px)
137{
138 uint64_t value = read_amcntenset1_el0();
139
140 value &= ~AMCNTENSET1_EL0_Pn_MASK;
141 value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
142
143 write_amcntenset1_el0(value);
144}
145
146static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
147{
148 uint64_t value = read_amcntenclr0_el0();
149
150 value &= ~AMCNTENCLR0_EL0_Pn_MASK;
151 value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
152
153 write_amcntenclr0_el0(value);
154}
155
156static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
157{
158 uint64_t value = read_amcntenclr1_el0();
159
160 value &= ~AMCNTENCLR1_EL0_Pn_MASK;
161 value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
162
163 write_amcntenclr1_el0(value);
164}
165
Chris Kay33b9be62021-05-26 11:58:23 +0100166#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kaye747a592021-05-24 20:35:26 +0100167static __unused bool amu_group1_supported(void)
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100168{
Chris Kay33b9be62021-05-26 11:58:23 +0100169 return read_amcfgr_el0_ncg() > 0U;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100170}
171#endif
172
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000173/*
Chris Kaye747a592021-05-24 20:35:26 +0100174 * Enable counters. This function is meant to be invoked by the context
175 * management library before exiting from EL3.
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000176 */
Boyan Karatotev4085a022023-03-27 17:02:43 +0100177void amu_enable(cpu_context_t *ctx)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000178{
Boyan Karatotev4085a022023-03-27 17:02:43 +0100179 /* Initialize FEAT_AMUv1p1 features if present. */
180 if (is_feat_amuv1p1_supported()) {
181 /*
182 * Set SCR_EL3.AMVOFFEN to one so that accesses to virtual
183 * offset registers at EL2 do not trap to EL3
184 */
185 ctx_write_scr_el3_amvoffen(ctx, 1U);
186 }
187}
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100188
Elizabeth Ho461c0a52023-07-18 14:10:25 +0100189void amu_enable_per_world(per_world_context_t *per_world_ctx)
190{
191 /*
192 * Set CPTR_EL3.TAM to zero so that any accesses to the Activity Monitor
193 * registers do not trap to EL3.
194 */
195 uint64_t cptr_el3 = per_world_ctx->ctx_cptr_el3;
196
197 cptr_el3 &= ~TAM_BIT;
198 per_world_ctx->ctx_cptr_el3 = cptr_el3;
199}
200
Boyan Karatotev4085a022023-03-27 17:02:43 +0100201void amu_init_el3(void)
202{
203 uint64_t group0_impl_ctr = read_amcgcr_el0_cg0nc();
204 uint64_t group0_en_mask = (1 << (group0_impl_ctr)) - 1U;
205 uint64_t num_ctr_groups = read_amcfgr_el0_ncg();
Chris Kaye747a592021-05-24 20:35:26 +0100206
Boyan Karatotev4085a022023-03-27 17:02:43 +0100207 /* Enable all architected counters by default */
208 write_amcntenset0_el0_px(group0_en_mask);
Chris Kay742ca232021-08-19 11:21:52 +0100209
210#if ENABLE_AMU_AUXILIARY_COUNTERS
Boyan Karatotev4085a022023-03-27 17:02:43 +0100211 if (num_ctr_groups > 0U) {
212 uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */
213 const struct amu_topology *topology;
Chris Kay742ca232021-08-19 11:21:52 +0100214
Boyan Karatotev4085a022023-03-27 17:02:43 +0100215 /*
216 * The platform may opt to enable specific auxiliary counters.
217 * This can be done via the common FCONF getter, or via the
218 * platform-implemented function.
219 */
Chris Kay742ca232021-08-19 11:21:52 +0100220#if ENABLE_AMU_FCONF
Boyan Karatotev4085a022023-03-27 17:02:43 +0100221 topology = FCONF_GET_PROPERTY(amu, config, topology);
Chris Kay742ca232021-08-19 11:21:52 +0100222#else
Boyan Karatotev4085a022023-03-27 17:02:43 +0100223 topology = plat_amu_topology();
Chris Kay742ca232021-08-19 11:21:52 +0100224#endif /* ENABLE_AMU_FCONF */
225
Boyan Karatotev4085a022023-03-27 17:02:43 +0100226 if (topology != NULL) {
227 unsigned int core_pos = plat_my_core_pos();
Chris Kay742ca232021-08-19 11:21:52 +0100228
Boyan Karatotev4085a022023-03-27 17:02:43 +0100229 amcntenset1_el0_px = topology->cores[core_pos].enable;
230 } else {
231 ERROR("AMU: failed to generate AMU topology\n");
232 }
233
234 write_amcntenset1_el0_px(amcntenset1_el0_px);
235 }
236#else /* ENABLE_AMU_AUXILIARY_COUNTERS */
237 if (num_ctr_groups > 0U) {
238 VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
Chris Kay742ca232021-08-19 11:21:52 +0100239 }
240#endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
241
Andre Przywarab57e16a2023-03-03 10:30:06 +0000242 if (is_feat_amuv1p1_supported()) {
johpow01873d4242020-10-02 13:41:11 -0500243#if AMU_RESTRICT_COUNTERS
Chris Kay68120782021-05-05 13:38:30 +0100244 /*
245 * FEAT_AMUv1p1 adds a register field to restrict access to
246 * group 1 counters at all but the highest implemented EL. This
247 * is controlled with the `AMU_RESTRICT_COUNTERS` compile time
248 * flag, when set, system register reads at lower ELs return
249 * zero. Reads from the memory mapped view are unaffected.
250 */
251 VERBOSE("AMU group 1 counter access restricted.\n");
252 write_amcr_el0_cg1rz(1U);
johpow01873d4242020-10-02 13:41:11 -0500253#else
Chris Kay68120782021-05-05 13:38:30 +0100254 write_amcr_el0_cg1rz(0U);
255#endif
256 }
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000257}
258
Boyan Karatotev4085a022023-03-27 17:02:43 +0100259void amu_init_el2_unused(void)
260{
261 /*
262 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity Monitor
263 * registers do not trap to EL2.
264 */
265 write_cptr_el2_tam(0U);
266
267 /* Initialize FEAT_AMUv1p1 features if present. */
268 if (is_feat_amuv1p1_supported()) {
269 /* Make sure virtual offsets are disabled if EL2 not used. */
270 write_hcr_el2_amvoffen(0U);
271 }
272}
273
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000274/* Read the group 0 counter identified by the given `idx`. */
Chris Kayb4b726e2021-05-24 21:00:07 +0100275static uint64_t amu_group0_cnt_read(unsigned int idx)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000276{
Andre Przywarab57e16a2023-03-03 10:30:06 +0000277 assert(is_feat_amu_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100278 assert(idx < read_amcgcr_el0_cg0nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000279
280 return amu_group0_cnt_read_internal(idx);
281}
282
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100283/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100284static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000285{
Andre Przywarab57e16a2023-03-03 10:30:06 +0000286 assert(is_feat_amu_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100287 assert(idx < read_amcgcr_el0_cg0nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000288
289 amu_group0_cnt_write_internal(idx, val);
290 isb();
291}
292
johpow01873d4242020-10-02 13:41:11 -0500293/*
Chris Kaye747a592021-05-24 20:35:26 +0100294 * Unlike with auxiliary counters, we cannot detect at runtime whether an
295 * architected counter supports a virtual offset. These are instead fixed
296 * according to FEAT_AMUv1p1, but this switch will need to be updated if later
297 * revisions of FEAT_AMU add additional architected counters.
298 */
299static bool amu_group0_voffset_supported(uint64_t idx)
300{
301 switch (idx) {
302 case 0U:
303 case 2U:
304 case 3U:
305 return true;
306
307 case 1U:
308 return false;
309
310 default:
311 ERROR("AMU: can't set up virtual offset for unknown "
Scott Branden4ce3e992020-08-25 13:49:32 -0700312 "architected counter %" PRIu64 "!\n", idx);
Chris Kaye747a592021-05-24 20:35:26 +0100313
314 panic();
315 }
316}
317
318/*
johpow01873d4242020-10-02 13:41:11 -0500319 * Read the group 0 offset register for a given index. Index must be 0, 2,
320 * or 3, the register for 1 does not exist.
321 *
322 * Using this function requires FEAT_AMUv1p1 support.
323 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100324static uint64_t amu_group0_voffset_read(unsigned int idx)
johpow01873d4242020-10-02 13:41:11 -0500325{
Andre Przywarab57e16a2023-03-03 10:30:06 +0000326 assert(is_feat_amuv1p1_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100327 assert(idx < read_amcgcr_el0_cg0nc());
johpow01873d4242020-10-02 13:41:11 -0500328 assert(idx != 1U);
329
330 return amu_group0_voffset_read_internal(idx);
331}
332
333/*
334 * Write the group 0 offset register for a given index. Index must be 0, 2, or
335 * 3, the register for 1 does not exist.
336 *
337 * Using this function requires FEAT_AMUv1p1 support.
338 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100339static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
johpow01873d4242020-10-02 13:41:11 -0500340{
Andre Przywarab57e16a2023-03-03 10:30:06 +0000341 assert(is_feat_amuv1p1_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100342 assert(idx < read_amcgcr_el0_cg0nc());
johpow01873d4242020-10-02 13:41:11 -0500343 assert(idx != 1U);
344
345 amu_group0_voffset_write_internal(idx, val);
346 isb();
347}
348
Chris Kay1fd685a2021-05-25 10:42:56 +0100349#if ENABLE_AMU_AUXILIARY_COUNTERS
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100350/* Read the group 1 counter identified by the given `idx` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100351static uint64_t amu_group1_cnt_read(unsigned int idx)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000352{
Andre Przywarab57e16a2023-03-03 10:30:06 +0000353 assert(is_feat_amu_supported());
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100354 assert(amu_group1_supported());
Chris Kay31d3cc22021-05-25 15:24:18 +0100355 assert(idx < read_amcgcr_el0_cg1nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000356
357 return amu_group1_cnt_read_internal(idx);
358}
359
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100360/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100361static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000362{
Andre Przywarab57e16a2023-03-03 10:30:06 +0000363 assert(is_feat_amu_supported());
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100364 assert(amu_group1_supported());
Chris Kay31d3cc22021-05-25 15:24:18 +0100365 assert(idx < read_amcgcr_el0_cg1nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000366
367 amu_group1_cnt_write_internal(idx, val);
368 isb();
369}
370
371/*
johpow01873d4242020-10-02 13:41:11 -0500372 * Read the group 1 offset register for a given index.
373 *
374 * Using this function requires FEAT_AMUv1p1 support.
375 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100376static uint64_t amu_group1_voffset_read(unsigned int idx)
johpow01873d4242020-10-02 13:41:11 -0500377{
Andre Przywarab57e16a2023-03-03 10:30:06 +0000378 assert(is_feat_amuv1p1_supported());
johpow01873d4242020-10-02 13:41:11 -0500379 assert(amu_group1_supported());
Chris Kay31d3cc22021-05-25 15:24:18 +0100380 assert(idx < read_amcgcr_el0_cg1nc());
Chris Kay33b9be62021-05-26 11:58:23 +0100381 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01873d4242020-10-02 13:41:11 -0500382
383 return amu_group1_voffset_read_internal(idx);
384}
385
386/*
387 * Write the group 1 offset register for a given index.
388 *
389 * Using this function requires FEAT_AMUv1p1 support.
390 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100391static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
johpow01873d4242020-10-02 13:41:11 -0500392{
Andre Przywarab57e16a2023-03-03 10:30:06 +0000393 assert(is_feat_amuv1p1_supported());
johpow01873d4242020-10-02 13:41:11 -0500394 assert(amu_group1_supported());
Chris Kay31d3cc22021-05-25 15:24:18 +0100395 assert(idx < read_amcgcr_el0_cg1nc());
Chris Kay33b9be62021-05-26 11:58:23 +0100396 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01873d4242020-10-02 13:41:11 -0500397
398 amu_group1_voffset_write_internal(idx, val);
399 isb();
400}
Chris Kay1fd685a2021-05-25 10:42:56 +0100401#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000402
403static void *amu_context_save(const void *arg)
404{
Chris Kaye747a592021-05-24 20:35:26 +0100405 uint64_t i, j;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000406
Chris Kaye747a592021-05-24 20:35:26 +0100407 unsigned int core_pos;
408 struct amu_ctx *ctx;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000409
Andre Przywarab57e16a2023-03-03 10:30:06 +0000410 uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */
Chris Kaye747a592021-05-24 20:35:26 +0100411 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000412
Chris Kay1fd685a2021-05-25 10:42:56 +0100413#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kaye747a592021-05-24 20:35:26 +0100414 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
415 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
416 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
417#endif
418
Andre Przywarab57e16a2023-03-03 10:30:06 +0000419 if (!is_feat_amu_supported()) {
Chris Kaye747a592021-05-24 20:35:26 +0100420 return (void *)0;
421 }
422
423 core_pos = plat_my_core_pos();
424 ctx = &amu_ctxs_[core_pos];
425
426 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
Andre Przywarab57e16a2023-03-03 10:30:06 +0000427 if (is_feat_amuv1p1_supported()) {
428 hcr_el2_amvoffen = read_hcr_el2_amvoffen();
429 }
Chris Kaye747a592021-05-24 20:35:26 +0100430
431#if ENABLE_AMU_AUXILIARY_COUNTERS
432 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
433 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
434 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
435#endif
436
437 /*
438 * Disable all AMU counters.
439 */
440
441 ctx->group0_enable = read_amcntenset0_el0_px();
442 write_amcntenclr0_el0_px(ctx->group0_enable);
443
444#if ENABLE_AMU_AUXILIARY_COUNTERS
445 if (amcfgr_el0_ncg > 0U) {
446 ctx->group1_enable = read_amcntenset1_el0_px();
447 write_amcntenclr1_el0_px(ctx->group1_enable);
Chris Kay1fd685a2021-05-25 10:42:56 +0100448 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100449#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100450
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000451 /*
Chris Kaye747a592021-05-24 20:35:26 +0100452 * Save the counters to the local context.
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000453 */
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100454
Chris Kaye747a592021-05-24 20:35:26 +0100455 isb(); /* Ensure counters have been stopped */
Chris Kay1fd685a2021-05-25 10:42:56 +0100456
Chris Kaye747a592021-05-24 20:35:26 +0100457 for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000458 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100459 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000460
Chris Kay1fd685a2021-05-25 10:42:56 +0100461#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kaye747a592021-05-24 20:35:26 +0100462 for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
463 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
johpow01873d4242020-10-02 13:41:11 -0500464 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100465#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100466
Chris Kaye747a592021-05-24 20:35:26 +0100467 /*
468 * Save virtual offsets for counters that offer them.
469 */
470
471 if (hcr_el2_amvoffen != 0U) {
472 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
473 if (!amu_group0_voffset_supported(i)) {
474 continue; /* No virtual offset */
475 }
476
477 ctx->group0_voffsets[j++] = amu_group0_voffset_read(i);
478 }
479
480#if ENABLE_AMU_AUXILIARY_COUNTERS
481 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
482 if ((amcg1idr_el0_voff >> i) & 1U) {
483 continue; /* No virtual offset */
484 }
485
486 ctx->group1_voffsets[j++] = amu_group1_voffset_read(i);
487 }
488#endif
489 }
490
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100491 return (void *)0;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000492}
493
494static void *amu_context_restore(const void *arg)
495{
Chris Kaye747a592021-05-24 20:35:26 +0100496 uint64_t i, j;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000497
Chris Kaye747a592021-05-24 20:35:26 +0100498 unsigned int core_pos;
499 struct amu_ctx *ctx;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000500
Andre Przywarab57e16a2023-03-03 10:30:06 +0000501 uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */
Chris Kaye747a592021-05-24 20:35:26 +0100502
Chris Kaye747a592021-05-24 20:35:26 +0100503 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000504
Chris Kay1fd685a2021-05-25 10:42:56 +0100505#if ENABLE_AMU_AUXILIARY_COUNTERS
Boyan Karatotev4085a022023-03-27 17:02:43 +0100506 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
Chris Kaye747a592021-05-24 20:35:26 +0100507 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
508 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100509#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000510
Andre Przywarab57e16a2023-03-03 10:30:06 +0000511 if (!is_feat_amu_supported()) {
Chris Kaye747a592021-05-24 20:35:26 +0100512 return (void *)0;
513 }
514
515 core_pos = plat_my_core_pos();
516 ctx = &amu_ctxs_[core_pos];
517
Chris Kaye747a592021-05-24 20:35:26 +0100518 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
519
Andre Przywarab57e16a2023-03-03 10:30:06 +0000520 if (is_feat_amuv1p1_supported()) {
521 hcr_el2_amvoffen = read_hcr_el2_amvoffen();
522 }
Chris Kaye747a592021-05-24 20:35:26 +0100523
524#if ENABLE_AMU_AUXILIARY_COUNTERS
Boyan Karatotev4085a022023-03-27 17:02:43 +0100525 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
Chris Kaye747a592021-05-24 20:35:26 +0100526 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
527 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
528#endif
529
530 /*
Chris Kaye747a592021-05-24 20:35:26 +0100531 * Restore the counter values from the local context.
532 */
533
534 for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100535 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
536 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000537
Chris Kaye747a592021-05-24 20:35:26 +0100538#if ENABLE_AMU_AUXILIARY_COUNTERS
539 for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
540 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
johpow01873d4242020-10-02 13:41:11 -0500541 }
Chris Kaye747a592021-05-24 20:35:26 +0100542#endif
johpow01873d4242020-10-02 13:41:11 -0500543
Chris Kaye747a592021-05-24 20:35:26 +0100544 /*
545 * Restore virtual offsets for counters that offer them.
546 */
547
548 if (hcr_el2_amvoffen != 0U) {
549 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
550 if (!amu_group0_voffset_supported(i)) {
551 continue; /* No virtual offset */
552 }
553
554 amu_group0_voffset_write(i, ctx->group0_voffsets[j++]);
555 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100556
Chris Kay1fd685a2021-05-25 10:42:56 +0100557#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kaye747a592021-05-24 20:35:26 +0100558 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
559 if ((amcg1idr_el0_voff >> i) & 1U) {
560 continue; /* No virtual offset */
johpow01873d4242020-10-02 13:41:11 -0500561 }
Chris Kaye747a592021-05-24 20:35:26 +0100562
563 amu_group1_voffset_write(i, ctx->group1_voffsets[j++]);
johpow01873d4242020-10-02 13:41:11 -0500564 }
Chris Kaye747a592021-05-24 20:35:26 +0100565#endif
566 }
johpow01873d4242020-10-02 13:41:11 -0500567
Chris Kaye747a592021-05-24 20:35:26 +0100568 /*
569 * Re-enable counters that were disabled during context save.
570 */
Chris Kay1fd685a2021-05-25 10:42:56 +0100571
Chris Kaye747a592021-05-24 20:35:26 +0100572 write_amcntenset0_el0_px(ctx->group0_enable);
Chris Kay1fd685a2021-05-25 10:42:56 +0100573
Chris Kaye747a592021-05-24 20:35:26 +0100574#if ENABLE_AMU_AUXILIARY_COUNTERS
575 if (amcfgr_el0_ncg > 0) {
576 write_amcntenset1_el0_px(ctx->group1_enable);
Chris Kay1fd685a2021-05-25 10:42:56 +0100577 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100578#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000579
Chris Kay68120782021-05-05 13:38:30 +0100580#if ENABLE_MPMM
581 mpmm_enable();
582#endif
583
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100584 return (void *)0;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000585}
586
587SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
588SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);