blob: 114a57d9e9bc98ef2ab2e387f929664a67e8f424 [file] [log] [blame]
Andre Przywaraccd81f12022-11-21 17:04:10 +00001/*
Manish Pandey3c789bf2023-12-08 20:13:29 +00002 * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
Varun Wadekar0ed3be62023-04-13 21:06:18 +01003 * Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
Andre Przywaraccd81f12022-11-21 17:04:10 +00004 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 *
7 * Dispatch synchronous system register traps from lower ELs.
8 */
9
Manish Pandey3c789bf2023-12-08 20:13:29 +000010#include <arch_features.h>
11#include <arch_helpers.h>
Andre Przywaraccd81f12022-11-21 17:04:10 +000012#include <bl31/sync_handle.h>
13#include <context.h>
Manish Pandey3c789bf2023-12-08 20:13:29 +000014#include <lib/el3_runtime/context_mgmt.h>
Andre Przywaraccd81f12022-11-21 17:04:10 +000015
16int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx)
17{
Varun Wadekar0ed3be62023-04-13 21:06:18 +010018 uint64_t __unused opcode = esr_el3 & ISS_SYSREG_OPCODE_MASK;
19
Andre Przywara1ae75522022-11-21 17:07:25 +000020#if ENABLE_FEAT_RNG_TRAP
Varun Wadekar0ed3be62023-04-13 21:06:18 +010021 if ((opcode == ISS_SYSREG_OPCODE_RNDR) || (opcode == ISS_SYSREG_OPCODE_RNDRRS)) {
Andre Przywara1ae75522022-11-21 17:07:25 +000022 return plat_handle_rng_trap(esr_el3, ctx);
Andre Przywaraccd81f12022-11-21 17:04:10 +000023 }
Varun Wadekar0ed3be62023-04-13 21:06:18 +010024#endif
25
26#if IMPDEF_SYSREG_TRAP
27 if ((opcode & ISS_SYSREG_OPCODE_IMPDEF) == ISS_SYSREG_OPCODE_IMPDEF) {
28 return plat_handle_impdef_trap(esr_el3, ctx);
29 }
30#endif
31
32 return TRAP_RET_UNHANDLED;
Andre Przywaraccd81f12022-11-21 17:04:10 +000033}
Manish Pandey3c789bf2023-12-08 20:13:29 +000034
35static bool is_tge_enabled(void)
36{
37 u_register_t hcr_el2 = read_hcr_el2();
38
Sona Mathewaaaf2cc2024-03-13 11:33:54 -050039 return ((is_feat_vhe_present()) && ((hcr_el2 & HCR_TGE_BIT) != 0U));
Manish Pandey3c789bf2023-12-08 20:13:29 +000040}
41
42/*
43 * This function is to ensure that undef injection does not happen into
44 * non-existent S-EL2. This could happen when trap happens from S-EL{1,0}
45 * and non-secure world is running with TGE bit set, considering EL3 does
46 * not save/restore EL2 registers if only one world has EL2 enabled.
47 * So reading hcr_el2.TGE would give NS world value.
48 */
49static bool is_secure_trap_without_sel2(u_register_t scr)
50{
51 return ((scr & (SCR_NS_BIT | SCR_EEL2_BIT)) == 0);
52}
53
54static unsigned int target_el(unsigned int from_el, u_register_t scr)
55{
56 if (from_el > MODE_EL1) {
57 return from_el;
58 } else if (is_tge_enabled() && !is_secure_trap_without_sel2(scr)) {
59 return MODE_EL2;
60 } else {
61 return MODE_EL1;
62 }
63}
64
65static u_register_t get_elr_el3(u_register_t spsr_el3, u_register_t vbar, unsigned int target_el)
66{
67 unsigned int outgoing_el = GET_EL(spsr_el3);
68 u_register_t elr_el3 = 0;
69
70 if (outgoing_el == target_el) {
71 /*
72 * Target EL is either EL1 or EL2, lsb can tell us the SPsel
73 * Thread mode : 0
74 * Handler mode : 1
75 */
76 if ((spsr_el3 & (MODE_SP_MASK << MODE_SP_SHIFT)) == MODE_SP_ELX) {
77 elr_el3 = vbar + CURRENT_EL_SPX;
78 } else {
79 elr_el3 = vbar + CURRENT_EL_SP0;
80 }
81 } else {
82 /* Vector address for Lower EL using Aarch64 */
83 elr_el3 = vbar + LOWER_EL_AARCH64;
84 }
85
86 return elr_el3;
87}
88
89/*
90 * Explicitly create all bits of SPSR to get PSTATE at exception return.
91 *
92 * The code is based on "Aarch64.exceptions.takeexception" described in
John Powell025b1b82025-03-10 20:09:03 -050093 * DDI0602 revision 2025-03.
94 * "https://developer.arm.com/documentation/ddi0597/2025-03/Shared-Pseudocode/
Manish Pandey3c789bf2023-12-08 20:13:29 +000095 * aarch64-exceptions-takeexception"
96 *
John Powell025b1b82025-03-10 20:09:03 -050097 * NOTE: This piece of code must be reviewed every release against the latest
98 * takeexception sequence to ensure that we keep up with new arch features that
99 * affect the PSTATE.
Manish Pandeyf152d3b2024-11-13 11:37:18 +0000100 *
John Powell025b1b82025-03-10 20:09:03 -0500101 * TF-A 2.13 release review
102 *
103 * Review of version 2025-03 indicates we are missing support for one feature.
Manish Pandeyf152d3b2024-11-13 11:37:18 +0000104 * - FEAT_UINJ (2024 extension)
Manish Pandey3c789bf2023-12-08 20:13:29 +0000105 */
Arvind Ram Prakash03fafc02024-02-20 11:35:27 -0600106u_register_t create_spsr(u_register_t old_spsr, unsigned int target_el)
Manish Pandey3c789bf2023-12-08 20:13:29 +0000107{
108 u_register_t new_spsr = 0;
109 u_register_t sctlr;
110
111 /* Set M bits for target EL in AArch64 mode, also get sctlr */
112 if (target_el == MODE_EL2) {
113 sctlr = read_sctlr_el2();
114 new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL2H;
115 } else {
116 sctlr = read_sctlr_el1();
117 new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL1H;
118 }
119
120 /* Mask all exceptions, update DAIF bits */
121 new_spsr |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
122
123 /* If FEAT_BTI is present, clear BTYPE bits */
124 new_spsr |= old_spsr & (SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
Sona Mathewaaaf2cc2024-03-13 11:33:54 -0500125 if (is_feat_bti_present()) {
Manish Pandey3c789bf2023-12-08 20:13:29 +0000126 new_spsr &= ~(SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
127 }
128
129 /* If SSBS is implemented, take the value from SCTLR.DSSBS */
130 new_spsr |= old_spsr & SPSR_SSBS_BIT_AARCH64;
131 if (is_feat_ssbs_present()) {
132 if ((sctlr & SCTLR_DSSBS_BIT) != 0U) {
133 new_spsr |= SPSR_SSBS_BIT_AARCH64;
134 } else {
135 new_spsr &= ~SPSR_SSBS_BIT_AARCH64;
136 }
137 }
138
139 /* If FEAT_NMI is implemented, ALLINT = !(SCTLR.SPINTMASK) */
140 new_spsr |= old_spsr & SPSR_ALLINT_BIT_AARCH64;
141 if (is_feat_nmi_present()) {
142 if ((sctlr & SCTLR_SPINTMASK_BIT) != 0U) {
143 new_spsr &= ~SPSR_ALLINT_BIT_AARCH64;
144 } else {
145 new_spsr |= SPSR_ALLINT_BIT_AARCH64;
146 }
147 }
148
149 /* Clear PSTATE.IL bit explicitly */
150 new_spsr &= ~SPSR_IL_BIT;
151
152 /* Clear PSTATE.SS bit explicitly */
153 new_spsr &= ~SPSR_SS_BIT;
154
155 /* Update PSTATE.PAN bit */
156 new_spsr |= old_spsr & SPSR_PAN_BIT;
157 if (is_feat_pan_present() &&
158 ((target_el == MODE_EL1) || ((target_el == MODE_EL2) && is_tge_enabled())) &&
159 ((sctlr & SCTLR_SPAN_BIT) == 0U)) {
160 new_spsr |= SPSR_PAN_BIT;
161 }
162
163 /* Clear UAO bit if FEAT_UAO is present */
164 new_spsr |= old_spsr & SPSR_UAO_BIT_AARCH64;
165 if (is_feat_uao_present()) {
166 new_spsr &= ~SPSR_UAO_BIT_AARCH64;
167 }
168
169 /* DIT bits are unchanged */
170 new_spsr |= old_spsr & SPSR_DIT_BIT;
171
172 /* If FEAT_MTE2 is implemented mask tag faults by setting TCO bit */
173 new_spsr |= old_spsr & SPSR_TCO_BIT_AARCH64;
Govindraj Rajac2823842024-03-07 14:42:20 -0600174 if (is_feat_mte2_present()) {
Manish Pandey3c789bf2023-12-08 20:13:29 +0000175 new_spsr |= SPSR_TCO_BIT_AARCH64;
176 }
177
178 /* NZCV bits are unchanged */
179 new_spsr |= old_spsr & SPSR_NZCV;
180
181 /* If FEAT_EBEP is present set PM bit */
182 new_spsr |= old_spsr & SPSR_PM_BIT_AARCH64;
183 if (is_feat_ebep_present()) {
184 new_spsr |= SPSR_PM_BIT_AARCH64;
185 }
186
187 /* If FEAT_SEBEP is present clear PPEND bit */
188 new_spsr |= old_spsr & SPSR_PPEND_BIT;
189 if (is_feat_sebep_present()) {
190 new_spsr &= ~SPSR_PPEND_BIT;
191 }
192
193 /* If FEAT_GCS is present, update EXLOCK bit */
194 new_spsr |= old_spsr & SPSR_EXLOCK_BIT_AARCH64;
195 if (is_feat_gcs_present()) {
196 u_register_t gcscr;
197 if (target_el == MODE_EL2) {
198 gcscr = read_gcscr_el2();
199 } else {
200 gcscr = read_gcscr_el1();
201 }
202 new_spsr |= (gcscr & GCSCR_EXLOCK_EN_BIT) ? SPSR_EXLOCK_BIT_AARCH64 : 0;
203 }
204
John Powell025b1b82025-03-10 20:09:03 -0500205 /* If FEAT_PAUTH_LR present then zero the PACM bit. */
206 new_spsr |= old_spsr & SPSR_PACM_BIT_AARCH64;
207 if (is_feat_pauth_lr_present()) {
208 new_spsr &= ~SPSR_PACM_BIT_AARCH64;
209 }
210
Manish Pandey3c789bf2023-12-08 20:13:29 +0000211 return new_spsr;
212}
213
214/*
215 * Handler for injecting Undefined exception to lower EL which is caused by
216 * lower EL accessing system registers of which (old)EL3 firmware is unaware.
217 *
218 * This is a safety net to avoid EL3 panics caused by system register access
219 * that triggers an exception syndrome EC=0x18.
220 */
221void inject_undef64(cpu_context_t *ctx)
222{
223 u_register_t esr = (EC_UNKNOWN << ESR_EC_SHIFT) | ESR_IL_BIT;
224 el3_state_t *state = get_el3state_ctx(ctx);
225 u_register_t elr_el3 = read_ctx_reg(state, CTX_ELR_EL3);
226 u_register_t old_spsr = read_ctx_reg(state, CTX_SPSR_EL3);
227 u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
228 u_register_t new_spsr = 0;
229 unsigned int to_el = target_el(GET_EL(old_spsr), scr_el3);
230
231 if (to_el == MODE_EL2) {
232 write_elr_el2(elr_el3);
233 elr_el3 = get_elr_el3(old_spsr, read_vbar_el2(), to_el);
234 write_esr_el2(esr);
235 write_spsr_el2(old_spsr);
236 } else {
237 write_elr_el1(elr_el3);
238 elr_el3 = get_elr_el3(old_spsr, read_vbar_el1(), to_el);
239 write_esr_el1(esr);
240 write_spsr_el1(old_spsr);
241 }
242
243 new_spsr = create_spsr(old_spsr, to_el);
244
245 write_ctx_reg(state, CTX_SPSR_EL3, new_spsr);
246 write_ctx_reg(state, CTX_ELR_EL3, elr_el3);
247}