blob: ddd8516e6930f0401b32bec6e09313b0357452ba [file] [log] [blame]
Dimitris Papastamos281a08c2017-10-13 12:06:06 +01001/*
Boyan Karatotevfc7dca72024-12-16 16:23:26 +00002 * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
Dimitris Papastamos281a08c2017-10-13 12:06:06 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00007#include <stdbool.h>
8
Dimitris Papastamos281a08c2017-10-13 12:06:06 +01009#include <arch.h>
Andre Przywara6437a092022-11-17 16:42:09 +000010#include <arch_features.h>
Dimitris Papastamos281a08c2017-10-13 12:06:06 +010011#include <arch_helpers.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000012#include <lib/extensions/spe.h>
Dimitris Papastamos281a08c2017-10-13 12:06:06 +010013
Jayanth Dodderi Chidanand777f1f62023-07-18 14:48:09 +010014#include <plat/common/platform.h>
15
Boyan Karatotev985b6a62025-07-17 09:38:19 +010016/*
17 * SPE is an unusual feature. Its enable is split into two:
18 * - (NSPBE, NSPB[0]) - the security state bits - determines which security
19 * state owns the profiling buffer.
20 * - NSPB[1] - the enable bit - determines if the security state that owns the
21 * buffer may access SPE registers.
22 *
23 * There is a secondary id register PMBIDR_EL1 that is more granular than
24 * ID_AA64DFR0_EL1. When a security state owns the buffer, PMBIDR_EL1.P will
25 * report that SPE programming is allowed. This means that the usual assumption
26 * that leaving all bits to a default of zero will disable the feature may not
27 * work correctly. To correctly disable SPE, the current security state must NOT
28 * own the buffer, irrespective of the enable bit. Then, to play nicely with
29 * SMCCC_ARCH_FEATURE_AVAILABILITY, the enable bit should correspond to the
30 * enable status. The feature is architected this way to allow for lazy context
31 * switching of the buffer - a world can be made owner of the buffer (with
32 * PMBIDR_EL1.P reporting full access) without giving it access to the registers
33 * (by trapping to EL3). Then context switching can be deferred until a world
34 * tries to use SPE at which point access can be given and the trapping
35 * instruction repeated.
36 *
37 * This can be simplified to the following rules:
38 * 1. To enable SPE for world X:
39 * * world X owns the buffer ((NSPBE, NSPB[0]) == SCR_EL3.{NSE, NS})
40 * * trapping disabled (NSPB[0] == 1)
41 * 2. To disable SPE for world X:
42 * * world X does not own the buffer ((NSPBE, NSPB[0]) != SCR_EL3.{NSE, NS})
43 * * trapping enabled (NSPB[0] == 0)
44 */
45
46/*
47 * MDCR_EL3.EnPMSN (ARM v8.7) and MDCR_EL3.EnPMS3: Do not trap access to
48 * PMSNEVFR_EL1 or PMSDSFR_EL1 register at NS-EL1 or NS-EL2 to EL3 if
49 * FEAT_SPEv1p2 or FEAT_SPE_FDS are implemented. Setting these bits to 1 doesn't
50 * have any effect on it when the features aren't implemented.
51 */
52void spe_enable_ns(cpu_context_t *ctx)
Dimitris Papastamos2ff8fbf2018-02-19 14:52:19 +000053{
Jayanth Dodderi Chidanand123002f2024-06-18 15:22:54 +010054 el3_state_t *state = get_el3state_ctx(ctx);
55 u_register_t mdcr_el3_val = read_ctx_reg(state, CTX_MDCR_EL3);
Dimitris Papastamos281a08c2017-10-13 12:06:06 +010056
Boyan Karatotev985b6a62025-07-17 09:38:19 +010057 mdcr_el3_val |= MDCR_NSPB_EN_BIT | MDCR_NSPB_SS_BIT | MDCR_EnPMSN_BIT | MDCR_EnPMS3_BIT;
Jayanth Dodderi Chidanand123002f2024-06-18 15:22:54 +010058 mdcr_el3_val &= ~(MDCR_NSPBE_BIT);
Boyan Karatotev985b6a62025-07-17 09:38:19 +010059
Jayanth Dodderi Chidanand123002f2024-06-18 15:22:54 +010060 write_ctx_reg(state, CTX_MDCR_EL3, mdcr_el3_val);
Dimitris Papastamos281a08c2017-10-13 12:06:06 +010061}
62
Boyan Karatotev985b6a62025-07-17 09:38:19 +010063/*
64 * MDCR_EL3.EnPMSN (ARM v8.7) and MDCR_EL3.EnPMS3: Clear the bits to trap access
65 * of PMSNEVFR_EL1 and PMSDSFR_EL1 from EL2/EL1 to EL3.
66 */
67static void spe_disable_others(cpu_context_t *ctx)
Manish Pandey651fe502024-07-18 16:17:45 +010068{
69 el3_state_t *state = get_el3state_ctx(ctx);
70 u_register_t mdcr_el3_val = read_ctx_reg(state, CTX_MDCR_EL3);
71
Boyan Karatotev985b6a62025-07-17 09:38:19 +010072 mdcr_el3_val |= MDCR_NSPB_SS_BIT;
73 mdcr_el3_val &= ~(MDCR_NSPB_EN_BIT | MDCR_NSPBE_BIT | MDCR_EnPMSN_BIT |
James Clark4fd98142025-04-24 16:00:26 +010074 MDCR_EnPMS3_BIT);
Manish Pandey651fe502024-07-18 16:17:45 +010075 write_ctx_reg(state, CTX_MDCR_EL3, mdcr_el3_val);
76}
77
Boyan Karatotev985b6a62025-07-17 09:38:19 +010078void spe_disable_secure(cpu_context_t *ctx)
79{
80 spe_disable_others(ctx);
81}
82
83void spe_disable_realm(cpu_context_t *ctx)
84{
85 spe_disable_others(ctx);
86}
87
Boyan Karatotev60d330d2023-02-16 15:12:45 +000088void spe_init_el2_unused(void)
89{
90 uint64_t v;
91
92 /*
93 * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
94 * profiling controls to EL2.
95 *
96 * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in Non-secure
97 * state. Accesses to profiling buffer controls at
98 * Non-secure EL1 are not trapped to EL2.
99 */
100 v = read_mdcr_el2();
101 v &= ~MDCR_EL2_TPMS;
102 v |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
103 write_mdcr_el2(v);
104}