blob: 62895ffac9deea08d2e8703c2abd7814ec3bc286 [file] [log] [blame]
Achin Gupta9ac63c52014-01-16 12:08:03 +00001/*
Govindraj Raja30788a82024-01-25 08:09:39 -06002 * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
Achin Gupta9ac63c52014-01-16 12:08:03 +00003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta9ac63c52014-01-16 12:08:03 +00005 */
6
Dan Handley97043ac2014-04-09 13:14:54 +01007#include <arch.h>
Andrew Thoelke0a30cf52014-03-18 13:46:55 +00008#include <asm_macros.S>
Jan Dabrosbb9549b2019-12-02 13:30:03 +01009#include <assert_macros.S>
Dan Handley97043ac2014-04-09 13:14:54 +010010#include <context.h>
Manish V Badarkhe3b8456b2020-07-23 12:43:25 +010011#include <el3_common_macros.S>
Achin Gupta9ac63c52014-01-16 12:08:03 +000012
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +010013#if CTX_INCLUDE_FPREGS
14 .global fpregs_context_save
15 .global fpregs_context_restore
Jayanth Dodderi Chidanand0ce220a2022-01-26 17:14:43 +000016#endif /* CTX_INCLUDE_FPREGS */
Jayanth Dodderi Chidanand59b7c0a2024-06-05 11:13:05 +010017
18#if ERRATA_SPECULATIVE_AT
19 .global save_and_update_ptw_el1_sys_regs
20#endif /* ERRATA_SPECULATIVE_AT */
21
Daniel Boulby97215e02022-01-19 11:20:05 +000022 .global prepare_el3_entry
Alexei Fedoroved108b52019-09-13 14:11:59 +010023 .global restore_gp_pmcr_pauth_regs
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +010024 .global el3_exit
25
Alexei Fedoroved108b52019-09-13 14:11:59 +010026/* ------------------------------------------------------------------
27 * The following function follows the aapcs_64 strictly to use
28 * x9-x17 (temporary caller-saved registers according to AArch64 PCS)
29 * to save floating point register context. It assumes that 'x0' is
30 * pointing to a 'fp_regs' structure where the register context will
Achin Gupta9ac63c52014-01-16 12:08:03 +000031 * be saved.
32 *
Alexei Fedoroved108b52019-09-13 14:11:59 +010033 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
34 * However currently we don't use VFP registers nor set traps in
35 * Trusted Firmware, and assume it's cleared.
Achin Gupta9ac63c52014-01-16 12:08:03 +000036 *
37 * TODO: Revisit when VFP is used in secure world
Alexei Fedoroved108b52019-09-13 14:11:59 +010038 * ------------------------------------------------------------------
Achin Gupta9ac63c52014-01-16 12:08:03 +000039 */
Juan Castillo0f21c542014-06-25 17:26:36 +010040#if CTX_INCLUDE_FPREGS
Andrew Thoelke0a30cf52014-03-18 13:46:55 +000041func fpregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +000042 stp q0, q1, [x0, #CTX_FP_Q0]
43 stp q2, q3, [x0, #CTX_FP_Q2]
44 stp q4, q5, [x0, #CTX_FP_Q4]
45 stp q6, q7, [x0, #CTX_FP_Q6]
46 stp q8, q9, [x0, #CTX_FP_Q8]
47 stp q10, q11, [x0, #CTX_FP_Q10]
48 stp q12, q13, [x0, #CTX_FP_Q12]
49 stp q14, q15, [x0, #CTX_FP_Q14]
50 stp q16, q17, [x0, #CTX_FP_Q16]
51 stp q18, q19, [x0, #CTX_FP_Q18]
52 stp q20, q21, [x0, #CTX_FP_Q20]
53 stp q22, q23, [x0, #CTX_FP_Q22]
54 stp q24, q25, [x0, #CTX_FP_Q24]
55 stp q26, q27, [x0, #CTX_FP_Q26]
56 stp q28, q29, [x0, #CTX_FP_Q28]
57 stp q30, q31, [x0, #CTX_FP_Q30]
58
59 mrs x9, fpsr
60 str x9, [x0, #CTX_FP_FPSR]
61
62 mrs x10, fpcr
63 str x10, [x0, #CTX_FP_FPCR]
64
David Cunado91089f32017-10-20 11:30:57 +010065#if CTX_INCLUDE_AARCH32_REGS
66 mrs x11, fpexc32_el2
67 str x11, [x0, #CTX_FP_FPEXC32_EL2]
Jayanth Dodderi Chidanand0ce220a2022-01-26 17:14:43 +000068#endif /* CTX_INCLUDE_AARCH32_REGS */
Achin Gupta9ac63c52014-01-16 12:08:03 +000069 ret
Kévin Petit8b779622015-03-24 14:03:57 +000070endfunc fpregs_context_save
Achin Gupta9ac63c52014-01-16 12:08:03 +000071
Alexei Fedoroved108b52019-09-13 14:11:59 +010072/* ------------------------------------------------------------------
73 * The following function follows the aapcs_64 strictly to use x9-x17
74 * (temporary caller-saved registers according to AArch64 PCS) to
75 * restore floating point register context. It assumes that 'x0' is
76 * pointing to a 'fp_regs' structure from where the register context
Achin Gupta9ac63c52014-01-16 12:08:03 +000077 * will be restored.
78 *
Alexei Fedoroved108b52019-09-13 14:11:59 +010079 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
80 * However currently we don't use VFP registers nor set traps in
81 * Trusted Firmware, and assume it's cleared.
Achin Gupta9ac63c52014-01-16 12:08:03 +000082 *
83 * TODO: Revisit when VFP is used in secure world
Alexei Fedoroved108b52019-09-13 14:11:59 +010084 * ------------------------------------------------------------------
Achin Gupta9ac63c52014-01-16 12:08:03 +000085 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +000086func fpregs_context_restore
Achin Gupta9ac63c52014-01-16 12:08:03 +000087 ldp q0, q1, [x0, #CTX_FP_Q0]
88 ldp q2, q3, [x0, #CTX_FP_Q2]
89 ldp q4, q5, [x0, #CTX_FP_Q4]
90 ldp q6, q7, [x0, #CTX_FP_Q6]
91 ldp q8, q9, [x0, #CTX_FP_Q8]
92 ldp q10, q11, [x0, #CTX_FP_Q10]
93 ldp q12, q13, [x0, #CTX_FP_Q12]
94 ldp q14, q15, [x0, #CTX_FP_Q14]
95 ldp q16, q17, [x0, #CTX_FP_Q16]
96 ldp q18, q19, [x0, #CTX_FP_Q18]
97 ldp q20, q21, [x0, #CTX_FP_Q20]
98 ldp q22, q23, [x0, #CTX_FP_Q22]
99 ldp q24, q25, [x0, #CTX_FP_Q24]
100 ldp q26, q27, [x0, #CTX_FP_Q26]
101 ldp q28, q29, [x0, #CTX_FP_Q28]
102 ldp q30, q31, [x0, #CTX_FP_Q30]
103
104 ldr x9, [x0, #CTX_FP_FPSR]
105 msr fpsr, x9
106
Soby Mathew817ac8d2015-12-03 09:42:50 +0000107 ldr x10, [x0, #CTX_FP_FPCR]
Achin Gupta9ac63c52014-01-16 12:08:03 +0000108 msr fpcr, x10
109
David Cunado91089f32017-10-20 11:30:57 +0100110#if CTX_INCLUDE_AARCH32_REGS
111 ldr x11, [x0, #CTX_FP_FPEXC32_EL2]
112 msr fpexc32_el2, x11
Jayanth Dodderi Chidanand0ce220a2022-01-26 17:14:43 +0000113#endif /* CTX_INCLUDE_AARCH32_REGS */
114
Achin Gupta9ac63c52014-01-16 12:08:03 +0000115 /*
116 * No explict ISB required here as ERET to
Sandrine Bailleux1645d3e2015-12-17 13:58:58 +0000117 * switch to secure EL1 or non-secure world
Achin Gupta9ac63c52014-01-16 12:08:03 +0000118 * covers it
119 */
120
121 ret
Kévin Petit8b779622015-03-24 14:03:57 +0000122endfunc fpregs_context_restore
Juan Castillo0f21c542014-06-25 17:26:36 +0100123#endif /* CTX_INCLUDE_FPREGS */
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +0100124
Daniel Boulby7d33ffe2021-05-25 18:09:34 +0100125 /*
Manish Pandey1cbe42a2022-11-17 15:47:05 +0000126 * Set SCR_EL3.EA bit to enable SErrors at EL3
127 */
128 .macro enable_serror_at_el3
129 mrs x8, scr_el3
130 orr x8, x8, #SCR_EA_BIT
131 msr scr_el3, x8
132 .endm
133
134 /*
Daniel Boulby7d33ffe2021-05-25 18:09:34 +0100135 * Set the PSTATE bits not set when the exception was taken as
136 * described in the AArch64.TakeException() pseudocode function
137 * in ARM DDI 0487F.c page J1-7635 to a default value.
138 */
139 .macro set_unset_pstate_bits
Jayanth Dodderi Chidanand0ce220a2022-01-26 17:14:43 +0000140 /*
141 * If Data Independent Timing (DIT) functionality is implemented,
142 * always enable DIT in EL3
143 */
Daniel Boulby7d33ffe2021-05-25 18:09:34 +0100144#if ENABLE_FEAT_DIT
Andre Przywara88727fc2023-01-26 16:47:52 +0000145#if ENABLE_FEAT_DIT == 2
146 mrs x8, id_aa64pfr0_el1
147 and x8, x8, #(ID_AA64PFR0_DIT_MASK << ID_AA64PFR0_DIT_SHIFT)
148 cbz x8, 1f
149#endif
Jayanth Dodderi Chidanand0ce220a2022-01-26 17:14:43 +0000150 mov x8, #DIT_BIT
151 msr DIT, x8
Andre Przywara88727fc2023-01-26 16:47:52 +00001521:
Daniel Boulby7d33ffe2021-05-25 18:09:34 +0100153#endif /* ENABLE_FEAT_DIT */
154 .endm /* set_unset_pstate_bits */
155
Arvind Ram Prakashedebefb2023-10-11 12:10:56 -0500156/*-------------------------------------------------------------------------
157 * This macro checks the ENABLE_FEAT_MPAM state, performs ID register
158 * check to see if the platform supports MPAM extension and restores MPAM3
159 * register value if it is FEAT_STATE_ENABLED/FEAT_STATE_CHECKED.
160 *
161 * This is particularly more complicated because we can't check
162 * if the platform supports MPAM by looking for status of a particular bit
163 * in the MDCR_EL3 or CPTR_EL3 register like other extensions.
164 * ------------------------------------------------------------------------
165 */
166
167 .macro restore_mpam3_el3
168#if ENABLE_FEAT_MPAM
169#if ENABLE_FEAT_MPAM == 2
170
171 mrs x8, id_aa64pfr0_el1
172 lsr x8, x8, #(ID_AA64PFR0_MPAM_SHIFT)
173 and x8, x8, #(ID_AA64PFR0_MPAM_MASK)
174 mrs x7, id_aa64pfr1_el1
175 lsr x7, x7, #(ID_AA64PFR1_MPAM_FRAC_SHIFT)
176 and x7, x7, #(ID_AA64PFR1_MPAM_FRAC_MASK)
177 orr x7, x7, x8
178 cbz x7, no_mpam
179#endif
180 /* -----------------------------------------------------------
181 * Restore MPAM3_EL3 register as per context state
182 * Currently we only enable MPAM for NS world and trap to EL3
183 * for MPAM access in lower ELs of Secure and Realm world
Arvind Ram Prakashac4f6aa2023-11-08 12:28:30 -0600184 * x9 holds address of the per_world context
Arvind Ram Prakashedebefb2023-10-11 12:10:56 -0500185 * -----------------------------------------------------------
186 */
Arvind Ram Prakashac4f6aa2023-11-08 12:28:30 -0600187
188 ldr x17, [x9, #CTX_MPAM3_EL3]
Arvind Ram Prakashedebefb2023-10-11 12:10:56 -0500189 msr S3_6_C10_C5_0, x17 /* mpam3_el3 */
190
191no_mpam:
192#endif
193 .endm /* restore_mpam3_el3 */
194
Alexei Fedoroved108b52019-09-13 14:11:59 +0100195/* ------------------------------------------------------------------
Daniel Boulby97215e02022-01-19 11:20:05 +0000196 * The following macro is used to save and restore all the general
Alexei Fedoroved108b52019-09-13 14:11:59 +0100197 * purpose and ARMv8.3-PAuth (if enabled) registers.
Jayanth Dodderi Chidanandd64bfef2022-09-19 23:32:08 +0100198 * It also checks if the Secure Cycle Counter (PMCCNTR_EL0)
199 * is disabled in EL3/Secure (ARMv8.5-PMU), wherein PMCCNTR_EL0
200 * needs not to be saved/restored during world switch.
Alexei Fedoroved108b52019-09-13 14:11:59 +0100201 *
202 * Ideally we would only save and restore the callee saved registers
203 * when a world switch occurs but that type of implementation is more
204 * complex. So currently we will always save and restore these
205 * registers on entry and exit of EL3.
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +0100206 * clobbers: x18
Alexei Fedoroved108b52019-09-13 14:11:59 +0100207 * ------------------------------------------------------------------
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +0100208 */
Daniel Boulby97215e02022-01-19 11:20:05 +0000209 .macro save_gp_pmcr_pauth_regs
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +0100210 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
211 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
212 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
213 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
214 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
215 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
216 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
217 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
218 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
219 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
220 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
221 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
222 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
223 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
224 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
225 mrs x18, sp_el0
226 str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
Boyan Karatotevc73686a2023-02-15 13:21:50 +0000227
228 /* PMUv3 is presumed to be always present */
Alexei Fedoroved108b52019-09-13 14:11:59 +0100229 mrs x9, pmcr_el0
Alexei Fedoroved108b52019-09-13 14:11:59 +0100230 str x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
Alexei Fedoroved108b52019-09-13 14:11:59 +0100231 /* Disable cycle counter when event counting is prohibited */
Boyan Karatotev1d6d6802022-12-06 09:03:42 +0000232 orr x9, x9, #PMCR_EL0_DP_BIT
Alexei Fedoroved108b52019-09-13 14:11:59 +0100233 msr pmcr_el0, x9
234 isb
Alexei Fedoroved108b52019-09-13 14:11:59 +0100235#if CTX_INCLUDE_PAUTH_REGS
236 /* ----------------------------------------------------------
237 * Save the ARMv8.3-PAuth keys as they are not banked
238 * by exception level
239 * ----------------------------------------------------------
240 */
241 add x19, sp, #CTX_PAUTH_REGS_OFFSET
242
243 mrs x20, APIAKeyLo_EL1 /* x21:x20 = APIAKey */
244 mrs x21, APIAKeyHi_EL1
245 mrs x22, APIBKeyLo_EL1 /* x23:x22 = APIBKey */
246 mrs x23, APIBKeyHi_EL1
247 mrs x24, APDAKeyLo_EL1 /* x25:x24 = APDAKey */
248 mrs x25, APDAKeyHi_EL1
249 mrs x26, APDBKeyLo_EL1 /* x27:x26 = APDBKey */
250 mrs x27, APDBKeyHi_EL1
251 mrs x28, APGAKeyLo_EL1 /* x29:x28 = APGAKey */
252 mrs x29, APGAKeyHi_EL1
253
254 stp x20, x21, [x19, #CTX_PACIAKEY_LO]
255 stp x22, x23, [x19, #CTX_PACIBKEY_LO]
256 stp x24, x25, [x19, #CTX_PACDAKEY_LO]
257 stp x26, x27, [x19, #CTX_PACDBKEY_LO]
258 stp x28, x29, [x19, #CTX_PACGAKEY_LO]
259#endif /* CTX_INCLUDE_PAUTH_REGS */
Daniel Boulby97215e02022-01-19 11:20:05 +0000260 .endm /* save_gp_pmcr_pauth_regs */
261
262/* -----------------------------------------------------------------
Daniel Boulby7d33ffe2021-05-25 18:09:34 +0100263 * This function saves the context and sets the PSTATE to a known
264 * state, preparing entry to el3.
Daniel Boulby97215e02022-01-19 11:20:05 +0000265 * Save all the general purpose and ARMv8.3-PAuth (if enabled)
266 * registers.
Daniel Boulby7d33ffe2021-05-25 18:09:34 +0100267 * Then set any of the PSTATE bits that are not set by hardware
268 * according to the Aarch64.TakeException pseudocode in the Arm
269 * Architecture Reference Manual to a default value for EL3.
270 * clobbers: x17
Daniel Boulby97215e02022-01-19 11:20:05 +0000271 * -----------------------------------------------------------------
272 */
273func prepare_el3_entry
274 save_gp_pmcr_pauth_regs
Manish Pandey1cbe42a2022-11-17 15:47:05 +0000275 enable_serror_at_el3
Daniel Boulby7d33ffe2021-05-25 18:09:34 +0100276 /*
277 * Set the PSTATE bits not described in the Aarch64.TakeException
278 * pseudocode to their default values.
279 */
280 set_unset_pstate_bits
Alexei Fedoroved108b52019-09-13 14:11:59 +0100281 ret
Daniel Boulby97215e02022-01-19 11:20:05 +0000282endfunc prepare_el3_entry
Alexei Fedoroved108b52019-09-13 14:11:59 +0100283
284/* ------------------------------------------------------------------
285 * This function restores ARMv8.3-PAuth (if enabled) and all general
286 * purpose registers except x30 from the CPU context.
287 * x30 register must be explicitly restored by the caller.
288 * ------------------------------------------------------------------
Jeenu Viswambharanef653d92017-11-29 16:59:34 +0000289 */
Alexei Fedoroved108b52019-09-13 14:11:59 +0100290func restore_gp_pmcr_pauth_regs
291#if CTX_INCLUDE_PAUTH_REGS
292 /* Restore the ARMv8.3 PAuth keys */
293 add x10, sp, #CTX_PAUTH_REGS_OFFSET
294
295 ldp x0, x1, [x10, #CTX_PACIAKEY_LO] /* x1:x0 = APIAKey */
296 ldp x2, x3, [x10, #CTX_PACIBKEY_LO] /* x3:x2 = APIBKey */
297 ldp x4, x5, [x10, #CTX_PACDAKEY_LO] /* x5:x4 = APDAKey */
298 ldp x6, x7, [x10, #CTX_PACDBKEY_LO] /* x7:x6 = APDBKey */
299 ldp x8, x9, [x10, #CTX_PACGAKEY_LO] /* x9:x8 = APGAKey */
300
301 msr APIAKeyLo_EL1, x0
302 msr APIAKeyHi_EL1, x1
303 msr APIBKeyLo_EL1, x2
304 msr APIBKeyHi_EL1, x3
305 msr APDAKeyLo_EL1, x4
306 msr APDAKeyHi_EL1, x5
307 msr APDBKeyLo_EL1, x6
308 msr APDBKeyHi_EL1, x7
309 msr APGAKeyLo_EL1, x8
310 msr APGAKeyHi_EL1, x9
311#endif /* CTX_INCLUDE_PAUTH_REGS */
Boyan Karatotevc73686a2023-02-15 13:21:50 +0000312
313 /* PMUv3 is presumed to be always present */
Alexei Fedoroved108b52019-09-13 14:11:59 +0100314 ldr x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
315 msr pmcr_el0, x0
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +0100316 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
317 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +0100318 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
319 ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
320 ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
321 ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
322 ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
323 ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
Jeenu Viswambharanef653d92017-11-29 16:59:34 +0000324 ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +0100325 ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
326 ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
327 ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
328 ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
329 ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
Jeenu Viswambharanef653d92017-11-29 16:59:34 +0000330 ldr x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
331 msr sp_el0, x28
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +0100332 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
Jeenu Viswambharanef653d92017-11-29 16:59:34 +0000333 ret
Alexei Fedoroved108b52019-09-13 14:11:59 +0100334endfunc restore_gp_pmcr_pauth_regs
Jeenu Viswambharanef653d92017-11-29 16:59:34 +0000335
Jayanth Dodderi Chidanand59b7c0a2024-06-05 11:13:05 +0100336#if ERRATA_SPECULATIVE_AT
337/* --------------------------------------------------------------------
Manish V Badarkhe3b8456b2020-07-23 12:43:25 +0100338 * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1
339 * registers and update EL1 registers to disable stage1 and stage2
Jayanth Dodderi Chidanand59b7c0a2024-06-05 11:13:05 +0100340 * page table walk.
341 * --------------------------------------------------------------------
Manish V Badarkhe3b8456b2020-07-23 12:43:25 +0100342 */
343func save_and_update_ptw_el1_sys_regs
344 /* ----------------------------------------------------------
345 * Save only sctlr_el1 and tcr_el1 registers
346 * ----------------------------------------------------------
347 */
348 mrs x29, sctlr_el1
Jayanth Dodderi Chidanand59b7c0a2024-06-05 11:13:05 +0100349 str x29, [sp, #(CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_SCTLR_EL1)]
Manish V Badarkhe3b8456b2020-07-23 12:43:25 +0100350 mrs x29, tcr_el1
Jayanth Dodderi Chidanand59b7c0a2024-06-05 11:13:05 +0100351 str x29, [sp, #(CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_TCR_EL1)]
Manish V Badarkhe3b8456b2020-07-23 12:43:25 +0100352
353 /* ------------------------------------------------------------
354 * Must follow below order in order to disable page table
355 * walk for lower ELs (EL1 and EL0). First step ensures that
356 * page table walk is disabled for stage1 and second step
357 * ensures that page table walker should use TCR_EL1.EPDx
358 * bits to perform address translation. ISB ensures that CPU
359 * does these 2 steps in order.
360 *
361 * 1. Update TCR_EL1.EPDx bits to disable page table walk by
362 * stage1.
363 * 2. Enable MMU bit to avoid identity mapping via stage2
364 * and force TCR_EL1.EPDx to be used by the page table
365 * walker.
366 * ------------------------------------------------------------
367 */
368 orr x29, x29, #(TCR_EPD0_BIT)
369 orr x29, x29, #(TCR_EPD1_BIT)
370 msr tcr_el1, x29
371 isb
372 mrs x29, sctlr_el1
373 orr x29, x29, #SCTLR_M_BIT
374 msr sctlr_el1, x29
375 isb
Manish V Badarkhe3b8456b2020-07-23 12:43:25 +0100376 ret
377endfunc save_and_update_ptw_el1_sys_regs
378
Jayanth Dodderi Chidanand59b7c0a2024-06-05 11:13:05 +0100379#endif /* ERRATA_SPECULATIVE_AT */
380
Elizabeth Ho461c0a52023-07-18 14:10:25 +0100381/* -----------------------------------------------------------------
382* The below macro returns the address of the per_world context for
383* the security state, retrieved through "get_security_state" macro.
384* The per_world context address is returned in the register argument.
385* Clobbers: x9, x10
386* ------------------------------------------------------------------
387*/
388
389.macro get_per_world_context _reg:req
390 ldr x10, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
391 get_security_state x9, x10
Jayanth Dodderi Chidanand4087ed62023-12-11 11:22:02 +0000392 mov_imm x10, (CTX_PERWORLD_EL3STATE_END - CTX_CPTR_EL3)
Elizabeth Ho461c0a52023-07-18 14:10:25 +0100393 mul x9, x9, x10
394 adrp x10, per_world_context
395 add x10, x10, :lo12:per_world_context
396 add x9, x9, x10
397 mov \_reg, x9
398.endm
399
Alexei Fedoroved108b52019-09-13 14:11:59 +0100400/* ------------------------------------------------------------------
401 * This routine assumes that the SP_EL3 is pointing to a valid
402 * context structure from where the gp regs and other special
403 * registers can be retrieved.
404 * ------------------------------------------------------------------
Antonio Nino Diaz4d1ccf02019-01-30 20:41:31 +0000405 */
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +0100406func el3_exit
Jan Dabrosbb9549b2019-12-02 13:30:03 +0100407#if ENABLE_ASSERTIONS
408 /* el3_exit assumes SP_EL0 on entry */
409 mrs x17, spsel
410 cmp x17, #MODE_SP_EL0
411 ASM_ASSERT(eq)
Jayanth Dodderi Chidanand0ce220a2022-01-26 17:14:43 +0000412#endif /* ENABLE_ASSERTIONS */
Jan Dabrosbb9549b2019-12-02 13:30:03 +0100413
Alexei Fedoroved108b52019-09-13 14:11:59 +0100414 /* ----------------------------------------------------------
415 * Save the current SP_EL0 i.e. the EL3 runtime stack which
416 * will be used for handling the next SMC.
417 * Then switch to SP_EL3.
418 * ----------------------------------------------------------
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +0100419 */
420 mov x17, sp
Alexei Fedoroved108b52019-09-13 14:11:59 +0100421 msr spsel, #MODE_SP_ELX
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +0100422 str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
423
Max Shvetsov0c5e7d12021-03-22 11:59:37 +0000424 /* ----------------------------------------------------------
Arunachalam Ganapathy68ac5ed2021-07-08 09:35:57 +0100425 * Restore CPTR_EL3.
Max Shvetsov0c5e7d12021-03-22 11:59:37 +0000426 * ZCR is only restored if SVE is supported and enabled.
427 * Synchronization is required before zcr_el3 is addressed.
428 * ----------------------------------------------------------
429 */
Elizabeth Ho461c0a52023-07-18 14:10:25 +0100430
431 /* The address of the per_world context is stored in x9 */
432 get_per_world_context x9
433
434 ldp x19, x20, [x9, #CTX_CPTR_EL3]
Max Shvetsov0c5e7d12021-03-22 11:59:37 +0000435 msr cptr_el3, x19
436
Boyan Karatotevf0c96a22023-04-20 11:00:50 +0100437#if IMAGE_BL31
Max Shvetsov0c5e7d12021-03-22 11:59:37 +0000438 ands x19, x19, #CPTR_EZ_BIT
439 beq sve_not_enabled
440
441 isb
442 msr S3_6_C1_C2_0, x20 /* zcr_el3 */
443sve_not_enabled:
Arvind Ram Prakashedebefb2023-10-11 12:10:56 -0500444
445 restore_mpam3_el3
446
Jayanth Dodderi Chidanand0ce220a2022-01-26 17:14:43 +0000447#endif /* IMAGE_BL31 */
Max Shvetsov0c5e7d12021-03-22 11:59:37 +0000448
Dimitris Papastamosfe007b22018-05-16 11:36:14 +0100449#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
Alexei Fedoroved108b52019-09-13 14:11:59 +0100450 /* ----------------------------------------------------------
451 * Restore mitigation state as it was on entry to EL3
452 * ----------------------------------------------------------
453 */
Dimitris Papastamosfe007b22018-05-16 11:36:14 +0100454 ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
Alexei Fedoroved108b52019-09-13 14:11:59 +0100455 cbz x17, 1f
Dimitris Papastamosfe007b22018-05-16 11:36:14 +0100456 blr x17
Antonio Nino Diaz4d1ccf02019-01-30 20:41:31 +00004571:
Jayanth Dodderi Chidanand0ce220a2022-01-26 17:14:43 +0000458#endif /* IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 */
459
Manish Pandey6597fcf2023-06-26 17:46:14 +0100460#if IMAGE_BL31
461 synchronize_errors
462#endif /* IMAGE_BL31 */
Jayanth Dodderi Chidanand0ce220a2022-01-26 17:14:43 +0000463
Jayanth Dodderi Chidanand123002f2024-06-18 15:22:54 +0100464 /* --------------------------------------------------------------
465 * Restore MDCR_EL3, SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
466 * --------------------------------------------------------------
Manish Pandeyff1d2ef2022-11-17 14:43:15 +0000467 */
Manish Pandeyff1d2ef2022-11-17 14:43:15 +0000468 ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
Jayanth Dodderi Chidanand123002f2024-06-18 15:22:54 +0100469 ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
470 ldr x19, [sp, #CTX_EL3STATE_OFFSET + CTX_MDCR_EL3]
Manish Pandeyff1d2ef2022-11-17 14:43:15 +0000471 msr spsr_el3, x16
472 msr elr_el3, x17
Jayanth Dodderi Chidanand123002f2024-06-18 15:22:54 +0100473 msr scr_el3, x18
474 msr mdcr_el3, x19
Manish Pandeyff1d2ef2022-11-17 14:43:15 +0000475
476 restore_ptw_el1_sys_regs
477
478 /* ----------------------------------------------------------
479 * Restore general purpose (including x30), PMCR_EL0 and
480 * ARMv8.3-PAuth registers.
481 * Exit EL3 via ERET to a lower exception level.
482 * ----------------------------------------------------------
483 */
484 bl restore_gp_pmcr_pauth_regs
485 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
486
Madhukar Pappireddyc2d32a52020-07-24 03:27:12 -0500487#ifdef IMAGE_BL31
Manish Pandeyd04c04a2023-05-25 13:46:14 +0100488 /* Clear the EL3 flag as we are exiting el3 */
489 str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
Jayanth Dodderi Chidanand0ce220a2022-01-26 17:14:43 +0000490#endif /* IMAGE_BL31 */
491
Anthony Steinhauserf461fe32020-01-07 15:44:06 -0800492 exception_return
Antonio Nino Diaz52839622019-01-31 11:58:00 +0000493
Yatharth Kocharbbf8f6f2015-10-02 17:56:48 +0100494endfunc el3_exit