Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 1 | /* |
| 2 | * SPDX-License-Identifier: BSD-3-Clause |
| 3 | * |
| 4 | * SPDX-FileCopyrightText: Copyright TF-RMM Contributors. |
| 5 | */ |
| 6 | |
| 7 | #include <arch.h> |
| 8 | #include <arch_features.h> |
| 9 | #include <attestation.h> |
| 10 | #include <buffer.h> |
| 11 | #include <cpuid.h> |
| 12 | #include <exit.h> |
| 13 | #include <fpu_helpers.h> |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 14 | #include <pmu.h> |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 15 | #include <rec.h> |
| 16 | #include <run.h> |
| 17 | #include <smc-rmi.h> |
| 18 | #include <sve.h> |
| 19 | #include <timers.h> |
| 20 | |
| 21 | static struct ns_state g_ns_data[MAX_CPUS]; |
| 22 | static uint8_t g_sve_data[MAX_CPUS][sizeof(struct sve_state)] |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 23 | __aligned(sizeof(__uint128_t)); |
| 24 | static struct pmu_state g_pmu_data[MAX_CPUS]; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 25 | |
| 26 | /* |
| 27 | * Initialize the aux data and any buffer pointers to the aux granule memory for |
| 28 | * use by REC when it is entered. |
| 29 | */ |
| 30 | static void init_aux_data(struct rec_aux_data *aux_data, |
| 31 | void *rec_aux, |
| 32 | unsigned int num_rec_aux) |
| 33 | { |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 34 | /* Ensure we have enough aux granules for use by REC */ |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 35 | assert(num_rec_aux >= REC_NUM_PAGES); |
| 36 | |
| 37 | aux_data->attest_heap_buf = (uint8_t *)rec_aux; |
| 38 | aux_data->pmu = (struct pmu_state *)((uint8_t *)rec_aux + |
| 39 | REC_HEAP_SIZE); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 40 | } |
| 41 | |
| 42 | /* |
| 43 | * The parent REC granules lock is expected to be acquired |
| 44 | * before functions map_rec_aux() and unmap_rec_aux() are called. |
| 45 | */ |
| 46 | static void *map_rec_aux(struct granule *rec_aux_pages[], unsigned long num_aux) |
| 47 | { |
| 48 | void *rec_aux = NULL; |
| 49 | |
| 50 | for (unsigned long i = 0UL; i < num_aux; i++) { |
| 51 | void *aux = granule_map(rec_aux_pages[i], SLOT_REC_AUX0 + i); |
| 52 | |
| 53 | if (i == 0UL) { |
| 54 | rec_aux = aux; |
| 55 | } |
| 56 | } |
| 57 | return rec_aux; |
| 58 | } |
| 59 | |
| 60 | static void unmap_rec_aux(void *rec_aux, unsigned long num_aux) |
| 61 | { |
| 62 | unsigned char *rec_aux_vaddr = (unsigned char *)rec_aux; |
| 63 | |
| 64 | for (unsigned long i = 0UL; i < num_aux; i++) { |
| 65 | buffer_unmap(rec_aux_vaddr + i * GRANULE_SIZE); |
| 66 | } |
| 67 | } |
| 68 | |
| 69 | static void save_sysreg_state(struct sysreg_state *sysregs) |
| 70 | { |
| 71 | sysregs->sp_el0 = read_sp_el0(); |
| 72 | sysregs->sp_el1 = read_sp_el1(); |
| 73 | sysregs->elr_el1 = read_elr_el12(); |
| 74 | sysregs->spsr_el1 = read_spsr_el12(); |
| 75 | sysregs->pmcr_el0 = read_pmcr_el0(); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 76 | sysregs->tpidrro_el0 = read_tpidrro_el0(); |
| 77 | sysregs->tpidr_el0 = read_tpidr_el0(); |
| 78 | sysregs->csselr_el1 = read_csselr_el1(); |
| 79 | sysregs->sctlr_el1 = read_sctlr_el12(); |
| 80 | sysregs->actlr_el1 = read_actlr_el1(); |
| 81 | sysregs->cpacr_el1 = read_cpacr_el12(); |
| 82 | sysregs->ttbr0_el1 = read_ttbr0_el12(); |
| 83 | sysregs->ttbr1_el1 = read_ttbr1_el12(); |
| 84 | sysregs->tcr_el1 = read_tcr_el12(); |
| 85 | sysregs->esr_el1 = read_esr_el12(); |
| 86 | sysregs->afsr0_el1 = read_afsr0_el12(); |
| 87 | sysregs->afsr1_el1 = read_afsr1_el12(); |
| 88 | sysregs->far_el1 = read_far_el12(); |
| 89 | sysregs->mair_el1 = read_mair_el12(); |
| 90 | sysregs->vbar_el1 = read_vbar_el12(); |
| 91 | |
| 92 | sysregs->contextidr_el1 = read_contextidr_el12(); |
| 93 | sysregs->tpidr_el1 = read_tpidr_el1(); |
| 94 | sysregs->amair_el1 = read_amair_el12(); |
| 95 | sysregs->cntkctl_el1 = read_cntkctl_el12(); |
| 96 | sysregs->par_el1 = read_par_el1(); |
| 97 | sysregs->mdscr_el1 = read_mdscr_el1(); |
| 98 | sysregs->mdccint_el1 = read_mdccint_el1(); |
| 99 | sysregs->disr_el1 = read_disr_el1(); |
| 100 | MPAM(sysregs->mpam0_el1 = read_mpam0_el1();) |
| 101 | |
| 102 | /* Timer registers */ |
| 103 | sysregs->cntpoff_el2 = read_cntpoff_el2(); |
| 104 | sysregs->cntvoff_el2 = read_cntvoff_el2(); |
| 105 | sysregs->cntp_ctl_el0 = read_cntp_ctl_el02(); |
| 106 | sysregs->cntp_cval_el0 = read_cntp_cval_el02(); |
| 107 | sysregs->cntv_ctl_el0 = read_cntv_ctl_el02(); |
| 108 | sysregs->cntv_cval_el0 = read_cntv_cval_el02(); |
| 109 | } |
| 110 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 111 | static void save_realm_state(struct rec *rec, struct rmi_rec_exit *rec_exit) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 112 | { |
| 113 | save_sysreg_state(&rec->sysregs); |
| 114 | |
| 115 | rec->pc = read_elr_el2(); |
| 116 | rec->pstate = read_spsr_el2(); |
| 117 | |
| 118 | gic_save_state(&rec->sysregs.gicstate); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 119 | |
| 120 | if (rec->realm_info.pmu_enabled) { |
| 121 | /* Expose PMU Realm state to NS */ |
| 122 | pmu_update_rec_exit(rec_exit); |
| 123 | |
| 124 | /* Save PMU context */ |
| 125 | pmu_save_state(rec->aux_data.pmu, |
| 126 | rec->realm_info.pmu_num_cnts); |
| 127 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | static void restore_sysreg_state(struct sysreg_state *sysregs) |
| 131 | { |
| 132 | write_sp_el0(sysregs->sp_el0); |
| 133 | write_sp_el1(sysregs->sp_el1); |
| 134 | write_elr_el12(sysregs->elr_el1); |
| 135 | write_spsr_el12(sysregs->spsr_el1); |
| 136 | write_pmcr_el0(sysregs->pmcr_el0); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 137 | write_tpidrro_el0(sysregs->tpidrro_el0); |
| 138 | write_tpidr_el0(sysregs->tpidr_el0); |
| 139 | write_csselr_el1(sysregs->csselr_el1); |
| 140 | write_sctlr_el12(sysregs->sctlr_el1); |
| 141 | write_actlr_el1(sysregs->actlr_el1); |
| 142 | write_cpacr_el12(sysregs->cpacr_el1); |
| 143 | write_ttbr0_el12(sysregs->ttbr0_el1); |
| 144 | write_ttbr1_el12(sysregs->ttbr1_el1); |
| 145 | write_tcr_el12(sysregs->tcr_el1); |
| 146 | write_esr_el12(sysregs->esr_el1); |
| 147 | write_afsr0_el12(sysregs->afsr0_el1); |
| 148 | write_afsr1_el12(sysregs->afsr1_el1); |
| 149 | write_far_el12(sysregs->far_el1); |
| 150 | write_mair_el12(sysregs->mair_el1); |
| 151 | write_vbar_el12(sysregs->vbar_el1); |
| 152 | |
| 153 | write_contextidr_el12(sysregs->contextidr_el1); |
| 154 | write_tpidr_el1(sysregs->tpidr_el1); |
| 155 | write_amair_el12(sysregs->amair_el1); |
| 156 | write_cntkctl_el12(sysregs->cntkctl_el1); |
| 157 | write_par_el1(sysregs->par_el1); |
| 158 | write_mdscr_el1(sysregs->mdscr_el1); |
| 159 | write_mdccint_el1(sysregs->mdccint_el1); |
| 160 | write_disr_el1(sysregs->disr_el1); |
| 161 | MPAM(write_mpam0_el1(sysregs->mpam0_el1);) |
| 162 | write_vmpidr_el2(sysregs->vmpidr_el2); |
| 163 | |
| 164 | /* Timer registers */ |
| 165 | write_cntpoff_el2(sysregs->cntpoff_el2); |
| 166 | write_cntvoff_el2(sysregs->cntvoff_el2); |
| 167 | |
| 168 | /* |
| 169 | * Restore CNTx_CVAL registers before CNTx_CTL to avoid |
| 170 | * raising the interrupt signal briefly before lowering |
| 171 | * it again due to some expired CVAL left in the timer |
| 172 | * register. |
| 173 | */ |
| 174 | write_cntp_cval_el02(sysregs->cntp_cval_el0); |
| 175 | write_cntp_ctl_el02(sysregs->cntp_ctl_el0); |
| 176 | write_cntv_cval_el02(sysregs->cntv_cval_el0); |
| 177 | write_cntv_ctl_el02(sysregs->cntv_ctl_el0); |
| 178 | } |
| 179 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 180 | static void configure_realm_stage2(struct rec *rec) |
| 181 | { |
| 182 | write_vtcr_el2(rec->common_sysregs.vtcr_el2); |
| 183 | write_vttbr_el2(rec->common_sysregs.vttbr_el2); |
| 184 | } |
| 185 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 186 | static void restore_realm_state(struct rec *rec) |
| 187 | { |
| 188 | /* |
| 189 | * Restore this early to give time to the timer mask to propagate to |
| 190 | * the GIC. Issue an ISB to ensure the register write is actually |
| 191 | * performed before doing the remaining work. |
| 192 | */ |
| 193 | write_cnthctl_el2(rec->sysregs.cnthctl_el2); |
| 194 | isb(); |
| 195 | |
| 196 | restore_sysreg_state(&rec->sysregs); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 197 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 198 | write_elr_el2(rec->pc); |
| 199 | write_spsr_el2(rec->pstate); |
| 200 | write_hcr_el2(rec->sysregs.hcr_el2); |
| 201 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 202 | /* Control trapping of accesses to PMU registers */ |
| 203 | write_mdcr_el2(rec->common_sysregs.mdcr_el2); |
| 204 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 205 | gic_restore_state(&rec->sysregs.gicstate); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 206 | |
| 207 | configure_realm_stage2(rec); |
| 208 | |
| 209 | if (rec->realm_info.pmu_enabled) { |
| 210 | /* Restore PMU context */ |
| 211 | pmu_restore_state(rec->aux_data.pmu, |
| 212 | rec->realm_info.pmu_num_cnts); |
| 213 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 214 | } |
| 215 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 216 | static void save_ns_state(struct rec *rec) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 217 | { |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 218 | struct ns_state *ns_state = rec->ns; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 219 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 220 | save_sysreg_state(&ns_state->sysregs); |
| 221 | |
| 222 | /* |
| 223 | * CNTHCTL_EL2 is saved/restored separately from the main system |
| 224 | * registers, because the Realm configuration is written on every |
| 225 | * entry to the Realm, see `check_pending_timers`. |
| 226 | */ |
| 227 | ns_state->sysregs.cnthctl_el2 = read_cnthctl_el2(); |
| 228 | |
| 229 | ns_state->icc_sre_el2 = read_icc_sre_el2(); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 230 | |
| 231 | if (rec->realm_info.pmu_enabled) { |
| 232 | /* Save PMU context */ |
| 233 | pmu_save_state(ns_state->pmu, rec->realm_info.pmu_num_cnts); |
| 234 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 235 | } |
| 236 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 237 | static void restore_ns_state(struct rec *rec) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 238 | { |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 239 | struct ns_state *ns_state = rec->ns; |
| 240 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 241 | restore_sysreg_state(&ns_state->sysregs); |
| 242 | |
| 243 | /* |
| 244 | * CNTHCTL_EL2 is saved/restored separately from the main system |
| 245 | * registers, because the Realm configuration is written on every |
| 246 | * entry to the Realm, see `check_pending_timers`. |
| 247 | */ |
| 248 | write_cnthctl_el2(ns_state->sysregs.cnthctl_el2); |
| 249 | |
| 250 | write_icc_sre_el2(ns_state->icc_sre_el2); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 251 | |
| 252 | if (rec->realm_info.pmu_enabled) { |
| 253 | /* Restore PMU state */ |
| 254 | pmu_restore_state(ns_state->pmu, |
| 255 | rec->realm_info.pmu_num_cnts); |
| 256 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 257 | } |
| 258 | |
| 259 | static void activate_events(struct rec *rec) |
| 260 | { |
| 261 | /* |
| 262 | * The only event that may be activated at the Realm is the SError. |
| 263 | */ |
| 264 | if (rec->serror_info.inject) { |
| 265 | write_vsesr_el2(rec->serror_info.vsesr_el2); |
| 266 | write_hcr_el2(rec->sysregs.hcr_el2 | HCR_VSE); |
| 267 | rec->serror_info.inject = false; |
| 268 | } |
| 269 | } |
| 270 | |
| 271 | void inject_serror(struct rec *rec, unsigned long vsesr) |
| 272 | { |
| 273 | rec->serror_info.vsesr_el2 = vsesr; |
| 274 | rec->serror_info.inject = true; |
| 275 | } |
| 276 | |
| 277 | void rec_run_loop(struct rec *rec, struct rmi_rec_exit *rec_exit) |
| 278 | { |
| 279 | struct ns_state *ns_state; |
| 280 | int realm_exception_code; |
| 281 | void *rec_aux; |
| 282 | unsigned int cpuid = my_cpuid(); |
| 283 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 284 | assert(cpuid < MAX_CPUS); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 285 | assert(rec->ns == NULL); |
| 286 | assert(rec->fpu_ctx.used == false); |
| 287 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 288 | ns_state = &g_ns_data[cpuid]; |
| 289 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 290 | /* Ensure SVE/FPU and PMU context is cleared */ |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 291 | assert(ns_state->sve == NULL); |
| 292 | assert(ns_state->fpu == NULL); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 293 | assert(ns_state->pmu == NULL); |
| 294 | |
| 295 | rec->ns = ns_state; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 296 | |
| 297 | /* Map auxiliary granules */ |
| 298 | rec_aux = map_rec_aux(rec->g_aux, rec->num_rec_aux); |
| 299 | |
| 300 | init_aux_data(&(rec->aux_data), rec_aux, rec->num_rec_aux); |
| 301 | |
| 302 | /* |
| 303 | * The attset heap on the REC aux pages is mapped now. It is time to |
| 304 | * associate it with the current CPU. |
| 305 | * This heap will be used for attestation RSI calls when the |
| 306 | * REC is running. |
| 307 | */ |
| 308 | attestation_heap_ctx_assign_pe(&rec->alloc_info.ctx); |
| 309 | |
| 310 | /* |
| 311 | * Initialise the heap for attestation if necessary. |
| 312 | */ |
| 313 | if (!rec->alloc_info.ctx_initialised) { |
| 314 | (void)attestation_heap_ctx_init(rec->aux_data.attest_heap_buf, |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 315 | REC_HEAP_SIZE); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 316 | rec->alloc_info.ctx_initialised = true; |
| 317 | } |
| 318 | |
| 319 | if (is_feat_sve_present()) { |
| 320 | ns_state->sve = (struct sve_state *)&g_sve_data[cpuid]; |
| 321 | } else { |
| 322 | ns_state->fpu = (struct fpu_state *)&g_sve_data[cpuid]; |
| 323 | } |
| 324 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 325 | ns_state->pmu = &g_pmu_data[cpuid]; |
| 326 | |
| 327 | save_ns_state(rec); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 328 | restore_realm_state(rec); |
| 329 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 330 | do { |
| 331 | /* |
| 332 | * We must check the status of the arch timers in every |
| 333 | * iteration of the loop to ensure we update the timer |
| 334 | * mask on each entry to the realm and that we report any |
| 335 | * change in output level to the NS caller. |
| 336 | */ |
| 337 | if (check_pending_timers(rec)) { |
| 338 | rec_exit->exit_reason = RMI_EXIT_IRQ; |
| 339 | break; |
| 340 | } |
| 341 | |
| 342 | activate_events(rec); |
| 343 | realm_exception_code = run_realm(&rec->regs[0]); |
| 344 | } while (handle_realm_exit(rec, rec_exit, realm_exception_code)); |
| 345 | |
| 346 | /* |
| 347 | * Check if FPU/SIMD was used, and if it was, save the realm state, |
| 348 | * restore the NS state, and reenable traps in CPTR_EL2. |
| 349 | */ |
| 350 | if (rec->fpu_ctx.used) { |
| 351 | unsigned long cptr; |
| 352 | |
| 353 | cptr = read_cptr_el2(); |
AlexeiFedorov | 537bee0 | 2023-02-02 13:38:23 +0000 | [diff] [blame] | 354 | cptr &= ~MASK(CPTR_EL2_ZEN); |
| 355 | cptr |= INPLACE(CPTR_EL2_ZEN, CPTR_EL2_ZEN_NO_TRAP_11); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 356 | write_cptr_el2(cptr); |
Arunachalam Ganapathy | 66b5244 | 2023-03-06 11:02:46 +0000 | [diff] [blame] | 357 | isb(); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 358 | |
| 359 | fpu_save_state(&rec->fpu_ctx.fpu); |
| 360 | if (ns_state->sve != NULL) { |
| 361 | restore_sve_state(ns_state->sve); |
| 362 | } else { |
| 363 | assert(ns_state->fpu != NULL); |
| 364 | fpu_restore_state(ns_state->fpu); |
| 365 | } |
| 366 | |
| 367 | cptr = read_cptr_el2(); |
AlexeiFedorov | 537bee0 | 2023-02-02 13:38:23 +0000 | [diff] [blame] | 368 | cptr &= ~(MASK(CPTR_EL2_FPEN) | MASK(CPTR_EL2_ZEN)); |
| 369 | cptr |= INPLACE(CPTR_EL2_FPEN, CPTR_EL2_FPEN_TRAP_ALL_00) | |
| 370 | INPLACE(CPTR_EL2_ZEN, CPTR_EL2_ZEN_TRAP_ALL_00); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 371 | write_cptr_el2(cptr); |
Arunachalam Ganapathy | 66b5244 | 2023-03-06 11:02:46 +0000 | [diff] [blame] | 372 | isb(); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 373 | rec->fpu_ctx.used = false; |
| 374 | } |
| 375 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 376 | report_timer_state_to_ns(rec_exit); |
| 377 | |
| 378 | save_realm_state(rec, rec_exit); |
| 379 | restore_ns_state(rec); |
| 380 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 381 | /* |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 382 | * Clear FPU/SVE and PMU context while exiting |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 383 | */ |
| 384 | ns_state->sve = NULL; |
| 385 | ns_state->fpu = NULL; |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame^] | 386 | ns_state->pmu = NULL; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 387 | |
| 388 | /* |
| 389 | * Clear NS pointer since that struct is local to this function. |
| 390 | */ |
| 391 | rec->ns = NULL; |
| 392 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 393 | /* Undo the heap association */ |
Shruti Gupta | 9debb13 | 2022-12-13 14:38:49 +0000 | [diff] [blame] | 394 | attestation_heap_ctx_unassign_pe(); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 395 | /* Unmap auxiliary granules */ |
| 396 | unmap_rec_aux(rec_aux, rec->num_rec_aux); |
| 397 | } |