Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 1 | /* |
| 2 | * SPDX-License-Identifier: BSD-3-Clause |
| 3 | * |
| 4 | * SPDX-FileCopyrightText: Copyright TF-RMM Contributors. |
| 5 | */ |
| 6 | |
| 7 | #include <arch.h> |
| 8 | #include <arch_features.h> |
| 9 | #include <attestation.h> |
| 10 | #include <buffer.h> |
| 11 | #include <cpuid.h> |
| 12 | #include <exit.h> |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 13 | #include <pmu.h> |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 14 | #include <rec.h> |
| 15 | #include <run.h> |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 16 | #include <simd.h> |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 17 | #include <smc-rmi.h> |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 18 | #include <timers.h> |
| 19 | |
| 20 | static struct ns_state g_ns_data[MAX_CPUS]; |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 21 | static struct pmu_state g_pmu_data[MAX_CPUS]; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 22 | |
| 23 | /* |
| 24 | * Initialize the aux data and any buffer pointers to the aux granule memory for |
| 25 | * use by REC when it is entered. |
| 26 | */ |
| 27 | static void init_aux_data(struct rec_aux_data *aux_data, |
| 28 | void *rec_aux, |
| 29 | unsigned int num_rec_aux) |
| 30 | { |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 31 | /* Ensure we have enough aux granules for use by REC */ |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 32 | assert(num_rec_aux >= REC_NUM_PAGES); |
| 33 | |
| 34 | aux_data->attest_heap_buf = (uint8_t *)rec_aux; |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 35 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 36 | aux_data->pmu = (struct pmu_state *)((uint8_t *)rec_aux + |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 37 | REC_HEAP_SIZE); |
| 38 | |
| 39 | aux_data->rec_simd.simd = (struct simd_state *)((uint8_t *)rec_aux + |
| 40 | REC_HEAP_SIZE + |
| 41 | REC_PMU_SIZE); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | /* |
| 45 | * The parent REC granules lock is expected to be acquired |
| 46 | * before functions map_rec_aux() and unmap_rec_aux() are called. |
| 47 | */ |
| 48 | static void *map_rec_aux(struct granule *rec_aux_pages[], unsigned long num_aux) |
| 49 | { |
| 50 | void *rec_aux = NULL; |
| 51 | |
| 52 | for (unsigned long i = 0UL; i < num_aux; i++) { |
| 53 | void *aux = granule_map(rec_aux_pages[i], SLOT_REC_AUX0 + i); |
| 54 | |
| 55 | if (i == 0UL) { |
| 56 | rec_aux = aux; |
| 57 | } |
| 58 | } |
| 59 | return rec_aux; |
| 60 | } |
| 61 | |
| 62 | static void unmap_rec_aux(void *rec_aux, unsigned long num_aux) |
| 63 | { |
| 64 | unsigned char *rec_aux_vaddr = (unsigned char *)rec_aux; |
| 65 | |
| 66 | for (unsigned long i = 0UL; i < num_aux; i++) { |
| 67 | buffer_unmap(rec_aux_vaddr + i * GRANULE_SIZE); |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | static void save_sysreg_state(struct sysreg_state *sysregs) |
| 72 | { |
| 73 | sysregs->sp_el0 = read_sp_el0(); |
| 74 | sysregs->sp_el1 = read_sp_el1(); |
| 75 | sysregs->elr_el1 = read_elr_el12(); |
| 76 | sysregs->spsr_el1 = read_spsr_el12(); |
| 77 | sysregs->pmcr_el0 = read_pmcr_el0(); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 78 | sysregs->tpidrro_el0 = read_tpidrro_el0(); |
| 79 | sysregs->tpidr_el0 = read_tpidr_el0(); |
| 80 | sysregs->csselr_el1 = read_csselr_el1(); |
| 81 | sysregs->sctlr_el1 = read_sctlr_el12(); |
| 82 | sysregs->actlr_el1 = read_actlr_el1(); |
| 83 | sysregs->cpacr_el1 = read_cpacr_el12(); |
| 84 | sysregs->ttbr0_el1 = read_ttbr0_el12(); |
| 85 | sysregs->ttbr1_el1 = read_ttbr1_el12(); |
| 86 | sysregs->tcr_el1 = read_tcr_el12(); |
| 87 | sysregs->esr_el1 = read_esr_el12(); |
| 88 | sysregs->afsr0_el1 = read_afsr0_el12(); |
| 89 | sysregs->afsr1_el1 = read_afsr1_el12(); |
| 90 | sysregs->far_el1 = read_far_el12(); |
| 91 | sysregs->mair_el1 = read_mair_el12(); |
| 92 | sysregs->vbar_el1 = read_vbar_el12(); |
| 93 | |
| 94 | sysregs->contextidr_el1 = read_contextidr_el12(); |
| 95 | sysregs->tpidr_el1 = read_tpidr_el1(); |
| 96 | sysregs->amair_el1 = read_amair_el12(); |
| 97 | sysregs->cntkctl_el1 = read_cntkctl_el12(); |
| 98 | sysregs->par_el1 = read_par_el1(); |
| 99 | sysregs->mdscr_el1 = read_mdscr_el1(); |
| 100 | sysregs->mdccint_el1 = read_mdccint_el1(); |
| 101 | sysregs->disr_el1 = read_disr_el1(); |
| 102 | MPAM(sysregs->mpam0_el1 = read_mpam0_el1();) |
| 103 | |
| 104 | /* Timer registers */ |
| 105 | sysregs->cntpoff_el2 = read_cntpoff_el2(); |
| 106 | sysregs->cntvoff_el2 = read_cntvoff_el2(); |
| 107 | sysregs->cntp_ctl_el0 = read_cntp_ctl_el02(); |
| 108 | sysregs->cntp_cval_el0 = read_cntp_cval_el02(); |
| 109 | sysregs->cntv_ctl_el0 = read_cntv_ctl_el02(); |
| 110 | sysregs->cntv_cval_el0 = read_cntv_cval_el02(); |
| 111 | } |
| 112 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 113 | static void save_realm_state(struct rec *rec, struct rmi_rec_exit *rec_exit) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 114 | { |
| 115 | save_sysreg_state(&rec->sysregs); |
| 116 | |
| 117 | rec->pc = read_elr_el2(); |
| 118 | rec->pstate = read_spsr_el2(); |
| 119 | |
| 120 | gic_save_state(&rec->sysregs.gicstate); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 121 | |
| 122 | if (rec->realm_info.pmu_enabled) { |
| 123 | /* Expose PMU Realm state to NS */ |
| 124 | pmu_update_rec_exit(rec_exit); |
| 125 | |
| 126 | /* Save PMU context */ |
| 127 | pmu_save_state(rec->aux_data.pmu, |
| 128 | rec->realm_info.pmu_num_cnts); |
| 129 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | static void restore_sysreg_state(struct sysreg_state *sysregs) |
| 133 | { |
| 134 | write_sp_el0(sysregs->sp_el0); |
| 135 | write_sp_el1(sysregs->sp_el1); |
| 136 | write_elr_el12(sysregs->elr_el1); |
| 137 | write_spsr_el12(sysregs->spsr_el1); |
| 138 | write_pmcr_el0(sysregs->pmcr_el0); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 139 | write_tpidrro_el0(sysregs->tpidrro_el0); |
| 140 | write_tpidr_el0(sysregs->tpidr_el0); |
| 141 | write_csselr_el1(sysregs->csselr_el1); |
| 142 | write_sctlr_el12(sysregs->sctlr_el1); |
| 143 | write_actlr_el1(sysregs->actlr_el1); |
| 144 | write_cpacr_el12(sysregs->cpacr_el1); |
| 145 | write_ttbr0_el12(sysregs->ttbr0_el1); |
| 146 | write_ttbr1_el12(sysregs->ttbr1_el1); |
| 147 | write_tcr_el12(sysregs->tcr_el1); |
| 148 | write_esr_el12(sysregs->esr_el1); |
| 149 | write_afsr0_el12(sysregs->afsr0_el1); |
| 150 | write_afsr1_el12(sysregs->afsr1_el1); |
| 151 | write_far_el12(sysregs->far_el1); |
| 152 | write_mair_el12(sysregs->mair_el1); |
| 153 | write_vbar_el12(sysregs->vbar_el1); |
| 154 | |
| 155 | write_contextidr_el12(sysregs->contextidr_el1); |
| 156 | write_tpidr_el1(sysregs->tpidr_el1); |
| 157 | write_amair_el12(sysregs->amair_el1); |
| 158 | write_cntkctl_el12(sysregs->cntkctl_el1); |
| 159 | write_par_el1(sysregs->par_el1); |
| 160 | write_mdscr_el1(sysregs->mdscr_el1); |
| 161 | write_mdccint_el1(sysregs->mdccint_el1); |
| 162 | write_disr_el1(sysregs->disr_el1); |
| 163 | MPAM(write_mpam0_el1(sysregs->mpam0_el1);) |
| 164 | write_vmpidr_el2(sysregs->vmpidr_el2); |
| 165 | |
| 166 | /* Timer registers */ |
| 167 | write_cntpoff_el2(sysregs->cntpoff_el2); |
| 168 | write_cntvoff_el2(sysregs->cntvoff_el2); |
| 169 | |
| 170 | /* |
| 171 | * Restore CNTx_CVAL registers before CNTx_CTL to avoid |
| 172 | * raising the interrupt signal briefly before lowering |
| 173 | * it again due to some expired CVAL left in the timer |
| 174 | * register. |
| 175 | */ |
| 176 | write_cntp_cval_el02(sysregs->cntp_cval_el0); |
| 177 | write_cntp_ctl_el02(sysregs->cntp_ctl_el0); |
| 178 | write_cntv_cval_el02(sysregs->cntv_cval_el0); |
| 179 | write_cntv_ctl_el02(sysregs->cntv_ctl_el0); |
| 180 | } |
| 181 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 182 | static void configure_realm_stage2(struct rec *rec) |
| 183 | { |
| 184 | write_vtcr_el2(rec->common_sysregs.vtcr_el2); |
| 185 | write_vttbr_el2(rec->common_sysregs.vttbr_el2); |
| 186 | } |
| 187 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 188 | static void restore_realm_state(struct rec *rec) |
| 189 | { |
| 190 | /* |
| 191 | * Restore this early to give time to the timer mask to propagate to |
| 192 | * the GIC. Issue an ISB to ensure the register write is actually |
| 193 | * performed before doing the remaining work. |
| 194 | */ |
| 195 | write_cnthctl_el2(rec->sysregs.cnthctl_el2); |
| 196 | isb(); |
| 197 | |
| 198 | restore_sysreg_state(&rec->sysregs); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 199 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 200 | write_elr_el2(rec->pc); |
| 201 | write_spsr_el2(rec->pstate); |
| 202 | write_hcr_el2(rec->sysregs.hcr_el2); |
| 203 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 204 | /* Control trapping of accesses to PMU registers */ |
| 205 | write_mdcr_el2(rec->common_sysregs.mdcr_el2); |
| 206 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 207 | gic_restore_state(&rec->sysregs.gicstate); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 208 | |
| 209 | configure_realm_stage2(rec); |
| 210 | |
| 211 | if (rec->realm_info.pmu_enabled) { |
| 212 | /* Restore PMU context */ |
| 213 | pmu_restore_state(rec->aux_data.pmu, |
| 214 | rec->realm_info.pmu_num_cnts); |
| 215 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 216 | } |
| 217 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 218 | static void save_ns_state(struct rec *rec) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 219 | { |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 220 | struct ns_state *ns_state = rec->ns; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 221 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 222 | save_sysreg_state(&ns_state->sysregs); |
| 223 | |
| 224 | /* |
| 225 | * CNTHCTL_EL2 is saved/restored separately from the main system |
| 226 | * registers, because the Realm configuration is written on every |
| 227 | * entry to the Realm, see `check_pending_timers`. |
| 228 | */ |
| 229 | ns_state->sysregs.cnthctl_el2 = read_cnthctl_el2(); |
| 230 | |
| 231 | ns_state->icc_sre_el2 = read_icc_sre_el2(); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 232 | |
| 233 | if (rec->realm_info.pmu_enabled) { |
| 234 | /* Save PMU context */ |
| 235 | pmu_save_state(ns_state->pmu, rec->realm_info.pmu_num_cnts); |
| 236 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 237 | } |
| 238 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 239 | static void restore_ns_state(struct rec *rec) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 240 | { |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 241 | struct ns_state *ns_state = rec->ns; |
| 242 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 243 | restore_sysreg_state(&ns_state->sysregs); |
| 244 | |
| 245 | /* |
| 246 | * CNTHCTL_EL2 is saved/restored separately from the main system |
| 247 | * registers, because the Realm configuration is written on every |
| 248 | * entry to the Realm, see `check_pending_timers`. |
| 249 | */ |
| 250 | write_cnthctl_el2(ns_state->sysregs.cnthctl_el2); |
| 251 | |
| 252 | write_icc_sre_el2(ns_state->icc_sre_el2); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 253 | |
| 254 | if (rec->realm_info.pmu_enabled) { |
| 255 | /* Restore PMU state */ |
| 256 | pmu_restore_state(ns_state->pmu, |
| 257 | rec->realm_info.pmu_num_cnts); |
| 258 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 259 | } |
| 260 | |
| 261 | static void activate_events(struct rec *rec) |
| 262 | { |
| 263 | /* |
| 264 | * The only event that may be activated at the Realm is the SError. |
| 265 | */ |
| 266 | if (rec->serror_info.inject) { |
| 267 | write_vsesr_el2(rec->serror_info.vsesr_el2); |
| 268 | write_hcr_el2(rec->sysregs.hcr_el2 | HCR_VSE); |
| 269 | rec->serror_info.inject = false; |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | void inject_serror(struct rec *rec, unsigned long vsesr) |
| 274 | { |
| 275 | rec->serror_info.vsesr_el2 = vsesr; |
| 276 | rec->serror_info.inject = true; |
| 277 | } |
| 278 | |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 279 | /* Initialize REC simd state once on the first REC enter */ |
| 280 | static void rec_simd_state_init(struct rec *rec) |
| 281 | { |
| 282 | struct rec_simd_state *rec_simd; |
| 283 | simd_t stype; |
| 284 | |
| 285 | rec_simd = &rec->aux_data.rec_simd; |
| 286 | assert(rec_simd->simd != NULL); |
| 287 | |
| 288 | if (rec_simd->init_done == true) { |
| 289 | return; |
| 290 | } |
| 291 | |
| 292 | stype = rec_simd_type(rec); |
| 293 | /* |
| 294 | * As part of lazy save/restore, the first state will be restored from |
| 295 | * the REC's simd_state. So the initial state is considered saved, call |
| 296 | * simd_state_init() to set the simd type. sve_vq will be set if the REC |
| 297 | * 'stype' is SIMD_SVE. |
| 298 | */ |
| 299 | simd_state_init(stype, rec_simd->simd, rec->realm_info.sve_vq); |
| 300 | rec_simd->simd_allowed = false; |
| 301 | rec_simd->init_done = true; |
| 302 | } |
| 303 | |
| 304 | /* Save the REC SIMD state to memory and disable simd access for the REC */ |
| 305 | static void rec_simd_save_disable(struct rec *rec) |
| 306 | { |
| 307 | struct rec_simd_state *rec_simd; |
| 308 | simd_t stype; |
| 309 | |
| 310 | rec_simd = &rec->aux_data.rec_simd; |
| 311 | |
| 312 | assert(rec_simd->simd != NULL); |
| 313 | assert(rec_simd->simd_allowed == true); |
| 314 | |
| 315 | stype = rec_simd_type(rec); |
| 316 | |
| 317 | /* |
| 318 | * As the REC has used the SIMD, no need to disable traps as it must be |
| 319 | * already disabled as part of last restore. |
| 320 | */ |
| 321 | rec_simd->simd_allowed = false; |
| 322 | simd_save_state(stype, rec_simd->simd); |
| 323 | simd_disable(); |
| 324 | } |
| 325 | |
| 326 | /* Restore the REC SIMD state from memory and enable simd access for the REC */ |
| 327 | void rec_simd_enable_restore(struct rec *rec) |
| 328 | { |
| 329 | struct rec_simd_state *rec_simd; |
| 330 | simd_t stype; |
| 331 | |
| 332 | assert(rec != NULL); |
| 333 | rec_simd = &rec->aux_data.rec_simd; |
| 334 | |
| 335 | assert(rec_simd->simd != NULL); |
| 336 | assert(rec_simd->simd_allowed == false); |
| 337 | |
| 338 | stype = rec_simd_type(rec); |
| 339 | simd_enable(stype); |
| 340 | simd_restore_state(stype, rec_simd->simd); |
| 341 | rec_simd->simd_allowed = true; |
| 342 | /* return with traps disabled to allow REC to use FPU and/or SVE */ |
| 343 | } |
| 344 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 345 | void rec_run_loop(struct rec *rec, struct rmi_rec_exit *rec_exit) |
| 346 | { |
| 347 | struct ns_state *ns_state; |
| 348 | int realm_exception_code; |
| 349 | void *rec_aux; |
| 350 | unsigned int cpuid = my_cpuid(); |
| 351 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 352 | assert(cpuid < MAX_CPUS); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 353 | assert(rec->ns == NULL); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 354 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 355 | ns_state = &g_ns_data[cpuid]; |
| 356 | |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 357 | /* Ensure PMU context is cleared */ |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 358 | assert(ns_state->pmu == NULL); |
| 359 | |
| 360 | rec->ns = ns_state; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 361 | |
| 362 | /* Map auxiliary granules */ |
| 363 | rec_aux = map_rec_aux(rec->g_aux, rec->num_rec_aux); |
| 364 | |
| 365 | init_aux_data(&(rec->aux_data), rec_aux, rec->num_rec_aux); |
| 366 | |
| 367 | /* |
| 368 | * The attset heap on the REC aux pages is mapped now. It is time to |
| 369 | * associate it with the current CPU. |
| 370 | * This heap will be used for attestation RSI calls when the |
| 371 | * REC is running. |
| 372 | */ |
| 373 | attestation_heap_ctx_assign_pe(&rec->alloc_info.ctx); |
| 374 | |
| 375 | /* |
| 376 | * Initialise the heap for attestation if necessary. |
| 377 | */ |
| 378 | if (!rec->alloc_info.ctx_initialised) { |
| 379 | (void)attestation_heap_ctx_init(rec->aux_data.attest_heap_buf, |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 380 | REC_HEAP_SIZE); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 381 | rec->alloc_info.ctx_initialised = true; |
| 382 | } |
| 383 | |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 384 | rec_simd_state_init(rec); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 385 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 386 | ns_state->pmu = &g_pmu_data[cpuid]; |
| 387 | |
| 388 | save_ns_state(rec); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 389 | restore_realm_state(rec); |
| 390 | |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 391 | /* The REC must enter run loop with SIMD access disabled */ |
| 392 | assert(rec_is_simd_allowed(rec) == false); |
| 393 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 394 | do { |
| 395 | /* |
| 396 | * We must check the status of the arch timers in every |
| 397 | * iteration of the loop to ensure we update the timer |
| 398 | * mask on each entry to the realm and that we report any |
| 399 | * change in output level to the NS caller. |
| 400 | */ |
| 401 | if (check_pending_timers(rec)) { |
| 402 | rec_exit->exit_reason = RMI_EXIT_IRQ; |
| 403 | break; |
| 404 | } |
| 405 | |
| 406 | activate_events(rec); |
| 407 | realm_exception_code = run_realm(&rec->regs[0]); |
| 408 | } while (handle_realm_exit(rec, rec_exit, realm_exception_code)); |
| 409 | |
| 410 | /* |
| 411 | * Check if FPU/SIMD was used, and if it was, save the realm state, |
| 412 | * restore the NS state, and reenable traps in CPTR_EL2. |
| 413 | */ |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 414 | if (rec_is_simd_allowed(rec)) { |
| 415 | /* Save REC SIMD state to memory and disable SIMD for REC */ |
| 416 | rec_simd_save_disable(rec); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 417 | |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 418 | /* Restore NS state based on system support for SVE or FPU */ |
| 419 | simd_restore_ns_state(); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 420 | } |
| 421 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 422 | report_timer_state_to_ns(rec_exit); |
| 423 | |
| 424 | save_realm_state(rec, rec_exit); |
| 425 | restore_ns_state(rec); |
| 426 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 427 | /* |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 428 | * Clear PMU context while exiting |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 429 | */ |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 430 | ns_state->pmu = NULL; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 431 | |
| 432 | /* |
| 433 | * Clear NS pointer since that struct is local to this function. |
| 434 | */ |
| 435 | rec->ns = NULL; |
| 436 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 437 | /* Undo the heap association */ |
Shruti Gupta | 9debb13 | 2022-12-13 14:38:49 +0000 | [diff] [blame] | 438 | attestation_heap_ctx_unassign_pe(); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 439 | /* Unmap auxiliary granules */ |
| 440 | unmap_rec_aux(rec_aux, rec->num_rec_aux); |
| 441 | } |