Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 1 | /* |
| 2 | * SPDX-License-Identifier: BSD-3-Clause |
| 3 | * |
| 4 | * SPDX-FileCopyrightText: Copyright TF-RMM Contributors. |
| 5 | */ |
| 6 | |
| 7 | #include <arch.h> |
| 8 | #include <arch_features.h> |
| 9 | #include <attestation.h> |
| 10 | #include <buffer.h> |
| 11 | #include <cpuid.h> |
| 12 | #include <exit.h> |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 13 | #include <pmu.h> |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 14 | #include <rec.h> |
| 15 | #include <run.h> |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 16 | #include <simd.h> |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 17 | #include <smc-rmi.h> |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 18 | #include <timers.h> |
| 19 | |
| 20 | static struct ns_state g_ns_data[MAX_CPUS]; |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 21 | static struct pmu_state g_pmu_data[MAX_CPUS]; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 22 | |
| 23 | /* |
AlexeiFedorov | f72ab30 | 2023-04-27 16:45:04 +0100 | [diff] [blame] | 24 | * Initialize pointers in @aux_data |
| 25 | * |
| 26 | * Call with parent REC granule's lock held. |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 27 | */ |
AlexeiFedorov | f72ab30 | 2023-04-27 16:45:04 +0100 | [diff] [blame] | 28 | void init_rec_aux_data(struct rec_aux_data *aux_data, void *rec_aux, |
| 29 | unsigned long num_aux) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 30 | { |
AlexeiFedorov | ec35c54 | 2023-04-27 17:52:02 +0100 | [diff] [blame] | 31 | /* |
| 32 | * Ensure we have enough aux granules for use by REC: |
| 33 | * - REC_HEAP_PAGES for MbedTLS heap |
| 34 | * - REC_PMU_PAGES for PMU state |
| 35 | * - REC_SIMD_PAGES for SIMD state |
| 36 | * - REC_ATTEST_PAGES for 'rec_attest_data' structure |
| 37 | */ |
AlexeiFedorov | f72ab30 | 2023-04-27 16:45:04 +0100 | [diff] [blame] | 38 | assert(num_aux >= REC_NUM_PAGES); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 39 | |
| 40 | aux_data->attest_heap_buf = (uint8_t *)rec_aux; |
| 41 | aux_data->pmu = (struct pmu_state *)((uint8_t *)rec_aux + |
AlexeiFedorov | ec35c54 | 2023-04-27 17:52:02 +0100 | [diff] [blame] | 42 | REC_HEAP_SIZE); |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 43 | aux_data->rec_simd.simd = (struct simd_state *)((uint8_t *)rec_aux + |
AlexeiFedorov | ec35c54 | 2023-04-27 17:52:02 +0100 | [diff] [blame] | 44 | REC_HEAP_SIZE + REC_PMU_SIZE); |
| 45 | aux_data->attest_data = (struct rec_attest_data *)((uint8_t *)rec_aux + |
| 46 | REC_HEAP_SIZE + REC_PMU_SIZE + REC_SIMD_SIZE); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | /* |
AlexeiFedorov | f72ab30 | 2023-04-27 16:45:04 +0100 | [diff] [blame] | 50 | * Map REC auxiliary Granules |
| 51 | * |
| 52 | * Call with parent REC granule's lock held. |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 53 | */ |
AlexeiFedorov | f72ab30 | 2023-04-27 16:45:04 +0100 | [diff] [blame] | 54 | void *map_rec_aux(struct granule *rec_aux_pages[], unsigned long num_aux) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 55 | { |
| 56 | void *rec_aux = NULL; |
| 57 | |
| 58 | for (unsigned long i = 0UL; i < num_aux; i++) { |
| 59 | void *aux = granule_map(rec_aux_pages[i], SLOT_REC_AUX0 + i); |
| 60 | |
| 61 | if (i == 0UL) { |
| 62 | rec_aux = aux; |
| 63 | } |
| 64 | } |
| 65 | return rec_aux; |
| 66 | } |
| 67 | |
AlexeiFedorov | f72ab30 | 2023-04-27 16:45:04 +0100 | [diff] [blame] | 68 | /* |
| 69 | * Unmap REC auxiliary Granules |
| 70 | * |
| 71 | * Call with parent REC granule's lock held. |
| 72 | */ |
| 73 | void unmap_rec_aux(void *rec_aux, unsigned long num_aux) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 74 | { |
| 75 | unsigned char *rec_aux_vaddr = (unsigned char *)rec_aux; |
| 76 | |
| 77 | for (unsigned long i = 0UL; i < num_aux; i++) { |
| 78 | buffer_unmap(rec_aux_vaddr + i * GRANULE_SIZE); |
| 79 | } |
| 80 | } |
| 81 | |
| 82 | static void save_sysreg_state(struct sysreg_state *sysregs) |
| 83 | { |
| 84 | sysregs->sp_el0 = read_sp_el0(); |
| 85 | sysregs->sp_el1 = read_sp_el1(); |
| 86 | sysregs->elr_el1 = read_elr_el12(); |
| 87 | sysregs->spsr_el1 = read_spsr_el12(); |
| 88 | sysregs->pmcr_el0 = read_pmcr_el0(); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 89 | sysregs->tpidrro_el0 = read_tpidrro_el0(); |
| 90 | sysregs->tpidr_el0 = read_tpidr_el0(); |
| 91 | sysregs->csselr_el1 = read_csselr_el1(); |
| 92 | sysregs->sctlr_el1 = read_sctlr_el12(); |
| 93 | sysregs->actlr_el1 = read_actlr_el1(); |
| 94 | sysregs->cpacr_el1 = read_cpacr_el12(); |
| 95 | sysregs->ttbr0_el1 = read_ttbr0_el12(); |
| 96 | sysregs->ttbr1_el1 = read_ttbr1_el12(); |
| 97 | sysregs->tcr_el1 = read_tcr_el12(); |
| 98 | sysregs->esr_el1 = read_esr_el12(); |
| 99 | sysregs->afsr0_el1 = read_afsr0_el12(); |
| 100 | sysregs->afsr1_el1 = read_afsr1_el12(); |
| 101 | sysregs->far_el1 = read_far_el12(); |
| 102 | sysregs->mair_el1 = read_mair_el12(); |
| 103 | sysregs->vbar_el1 = read_vbar_el12(); |
| 104 | |
| 105 | sysregs->contextidr_el1 = read_contextidr_el12(); |
| 106 | sysregs->tpidr_el1 = read_tpidr_el1(); |
| 107 | sysregs->amair_el1 = read_amair_el12(); |
| 108 | sysregs->cntkctl_el1 = read_cntkctl_el12(); |
| 109 | sysregs->par_el1 = read_par_el1(); |
| 110 | sysregs->mdscr_el1 = read_mdscr_el1(); |
| 111 | sysregs->mdccint_el1 = read_mdccint_el1(); |
| 112 | sysregs->disr_el1 = read_disr_el1(); |
| 113 | MPAM(sysregs->mpam0_el1 = read_mpam0_el1();) |
| 114 | |
| 115 | /* Timer registers */ |
| 116 | sysregs->cntpoff_el2 = read_cntpoff_el2(); |
| 117 | sysregs->cntvoff_el2 = read_cntvoff_el2(); |
| 118 | sysregs->cntp_ctl_el0 = read_cntp_ctl_el02(); |
| 119 | sysregs->cntp_cval_el0 = read_cntp_cval_el02(); |
| 120 | sysregs->cntv_ctl_el0 = read_cntv_ctl_el02(); |
| 121 | sysregs->cntv_cval_el0 = read_cntv_cval_el02(); |
| 122 | } |
| 123 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 124 | static void save_realm_state(struct rec *rec, struct rmi_rec_exit *rec_exit) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 125 | { |
| 126 | save_sysreg_state(&rec->sysregs); |
| 127 | |
| 128 | rec->pc = read_elr_el2(); |
| 129 | rec->pstate = read_spsr_el2(); |
| 130 | |
| 131 | gic_save_state(&rec->sysregs.gicstate); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 132 | |
| 133 | if (rec->realm_info.pmu_enabled) { |
| 134 | /* Expose PMU Realm state to NS */ |
| 135 | pmu_update_rec_exit(rec_exit); |
| 136 | |
| 137 | /* Save PMU context */ |
| 138 | pmu_save_state(rec->aux_data.pmu, |
AlexeiFedorov | 1800292 | 2023-04-06 10:19:51 +0100 | [diff] [blame] | 139 | rec->realm_info.pmu_num_ctrs); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 140 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 141 | } |
| 142 | |
| 143 | static void restore_sysreg_state(struct sysreg_state *sysregs) |
| 144 | { |
| 145 | write_sp_el0(sysregs->sp_el0); |
| 146 | write_sp_el1(sysregs->sp_el1); |
| 147 | write_elr_el12(sysregs->elr_el1); |
| 148 | write_spsr_el12(sysregs->spsr_el1); |
| 149 | write_pmcr_el0(sysregs->pmcr_el0); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 150 | write_tpidrro_el0(sysregs->tpidrro_el0); |
| 151 | write_tpidr_el0(sysregs->tpidr_el0); |
| 152 | write_csselr_el1(sysregs->csselr_el1); |
| 153 | write_sctlr_el12(sysregs->sctlr_el1); |
| 154 | write_actlr_el1(sysregs->actlr_el1); |
| 155 | write_cpacr_el12(sysregs->cpacr_el1); |
| 156 | write_ttbr0_el12(sysregs->ttbr0_el1); |
| 157 | write_ttbr1_el12(sysregs->ttbr1_el1); |
| 158 | write_tcr_el12(sysregs->tcr_el1); |
| 159 | write_esr_el12(sysregs->esr_el1); |
| 160 | write_afsr0_el12(sysregs->afsr0_el1); |
| 161 | write_afsr1_el12(sysregs->afsr1_el1); |
| 162 | write_far_el12(sysregs->far_el1); |
| 163 | write_mair_el12(sysregs->mair_el1); |
| 164 | write_vbar_el12(sysregs->vbar_el1); |
| 165 | |
| 166 | write_contextidr_el12(sysregs->contextidr_el1); |
| 167 | write_tpidr_el1(sysregs->tpidr_el1); |
| 168 | write_amair_el12(sysregs->amair_el1); |
| 169 | write_cntkctl_el12(sysregs->cntkctl_el1); |
| 170 | write_par_el1(sysregs->par_el1); |
| 171 | write_mdscr_el1(sysregs->mdscr_el1); |
| 172 | write_mdccint_el1(sysregs->mdccint_el1); |
| 173 | write_disr_el1(sysregs->disr_el1); |
| 174 | MPAM(write_mpam0_el1(sysregs->mpam0_el1);) |
| 175 | write_vmpidr_el2(sysregs->vmpidr_el2); |
| 176 | |
| 177 | /* Timer registers */ |
| 178 | write_cntpoff_el2(sysregs->cntpoff_el2); |
| 179 | write_cntvoff_el2(sysregs->cntvoff_el2); |
| 180 | |
| 181 | /* |
| 182 | * Restore CNTx_CVAL registers before CNTx_CTL to avoid |
| 183 | * raising the interrupt signal briefly before lowering |
| 184 | * it again due to some expired CVAL left in the timer |
| 185 | * register. |
| 186 | */ |
| 187 | write_cntp_cval_el02(sysregs->cntp_cval_el0); |
| 188 | write_cntp_ctl_el02(sysregs->cntp_ctl_el0); |
| 189 | write_cntv_cval_el02(sysregs->cntv_cval_el0); |
| 190 | write_cntv_ctl_el02(sysregs->cntv_ctl_el0); |
| 191 | } |
| 192 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 193 | static void configure_realm_stage2(struct rec *rec) |
| 194 | { |
| 195 | write_vtcr_el2(rec->common_sysregs.vtcr_el2); |
| 196 | write_vttbr_el2(rec->common_sysregs.vttbr_el2); |
| 197 | } |
| 198 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 199 | static void restore_realm_state(struct rec *rec) |
| 200 | { |
| 201 | /* |
| 202 | * Restore this early to give time to the timer mask to propagate to |
| 203 | * the GIC. Issue an ISB to ensure the register write is actually |
| 204 | * performed before doing the remaining work. |
| 205 | */ |
| 206 | write_cnthctl_el2(rec->sysregs.cnthctl_el2); |
| 207 | isb(); |
| 208 | |
| 209 | restore_sysreg_state(&rec->sysregs); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 210 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 211 | write_elr_el2(rec->pc); |
| 212 | write_spsr_el2(rec->pstate); |
| 213 | write_hcr_el2(rec->sysregs.hcr_el2); |
| 214 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 215 | /* Control trapping of accesses to PMU registers */ |
| 216 | write_mdcr_el2(rec->common_sysregs.mdcr_el2); |
| 217 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 218 | gic_restore_state(&rec->sysregs.gicstate); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 219 | |
| 220 | configure_realm_stage2(rec); |
| 221 | |
| 222 | if (rec->realm_info.pmu_enabled) { |
| 223 | /* Restore PMU context */ |
| 224 | pmu_restore_state(rec->aux_data.pmu, |
AlexeiFedorov | 1800292 | 2023-04-06 10:19:51 +0100 | [diff] [blame] | 225 | rec->realm_info.pmu_num_ctrs); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 226 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 227 | } |
| 228 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 229 | static void save_ns_state(struct rec *rec) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 230 | { |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 231 | struct ns_state *ns_state = rec->ns; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 232 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 233 | save_sysreg_state(&ns_state->sysregs); |
| 234 | |
| 235 | /* |
| 236 | * CNTHCTL_EL2 is saved/restored separately from the main system |
| 237 | * registers, because the Realm configuration is written on every |
| 238 | * entry to the Realm, see `check_pending_timers`. |
| 239 | */ |
| 240 | ns_state->sysregs.cnthctl_el2 = read_cnthctl_el2(); |
| 241 | |
| 242 | ns_state->icc_sre_el2 = read_icc_sre_el2(); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 243 | |
| 244 | if (rec->realm_info.pmu_enabled) { |
| 245 | /* Save PMU context */ |
AlexeiFedorov | 1800292 | 2023-04-06 10:19:51 +0100 | [diff] [blame] | 246 | pmu_save_state(ns_state->pmu, rec->realm_info.pmu_num_ctrs); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 247 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 248 | } |
| 249 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 250 | static void restore_ns_state(struct rec *rec) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 251 | { |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 252 | struct ns_state *ns_state = rec->ns; |
| 253 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 254 | restore_sysreg_state(&ns_state->sysregs); |
| 255 | |
| 256 | /* |
| 257 | * CNTHCTL_EL2 is saved/restored separately from the main system |
| 258 | * registers, because the Realm configuration is written on every |
| 259 | * entry to the Realm, see `check_pending_timers`. |
| 260 | */ |
| 261 | write_cnthctl_el2(ns_state->sysregs.cnthctl_el2); |
| 262 | |
| 263 | write_icc_sre_el2(ns_state->icc_sre_el2); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 264 | |
| 265 | if (rec->realm_info.pmu_enabled) { |
| 266 | /* Restore PMU state */ |
| 267 | pmu_restore_state(ns_state->pmu, |
AlexeiFedorov | 1800292 | 2023-04-06 10:19:51 +0100 | [diff] [blame] | 268 | rec->realm_info.pmu_num_ctrs); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 269 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 270 | } |
| 271 | |
| 272 | static void activate_events(struct rec *rec) |
| 273 | { |
| 274 | /* |
| 275 | * The only event that may be activated at the Realm is the SError. |
| 276 | */ |
| 277 | if (rec->serror_info.inject) { |
| 278 | write_vsesr_el2(rec->serror_info.vsesr_el2); |
| 279 | write_hcr_el2(rec->sysregs.hcr_el2 | HCR_VSE); |
| 280 | rec->serror_info.inject = false; |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | void inject_serror(struct rec *rec, unsigned long vsesr) |
| 285 | { |
| 286 | rec->serror_info.vsesr_el2 = vsesr; |
| 287 | rec->serror_info.inject = true; |
| 288 | } |
| 289 | |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 290 | /* Initialize REC simd state once on the first REC enter */ |
| 291 | static void rec_simd_state_init(struct rec *rec) |
| 292 | { |
| 293 | struct rec_simd_state *rec_simd; |
| 294 | simd_t stype; |
| 295 | |
| 296 | rec_simd = &rec->aux_data.rec_simd; |
| 297 | assert(rec_simd->simd != NULL); |
| 298 | |
| 299 | if (rec_simd->init_done == true) { |
| 300 | return; |
| 301 | } |
| 302 | |
| 303 | stype = rec_simd_type(rec); |
| 304 | /* |
| 305 | * As part of lazy save/restore, the first state will be restored from |
| 306 | * the REC's simd_state. So the initial state is considered saved, call |
| 307 | * simd_state_init() to set the simd type. sve_vq will be set if the REC |
| 308 | * 'stype' is SIMD_SVE. |
| 309 | */ |
| 310 | simd_state_init(stype, rec_simd->simd, rec->realm_info.sve_vq); |
| 311 | rec_simd->simd_allowed = false; |
| 312 | rec_simd->init_done = true; |
| 313 | } |
| 314 | |
| 315 | /* Save the REC SIMD state to memory and disable simd access for the REC */ |
Arunachalam Ganapathy | 5111993 | 2023-03-23 12:32:49 +0000 | [diff] [blame] | 316 | void rec_simd_save_disable(struct rec *rec) |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 317 | { |
| 318 | struct rec_simd_state *rec_simd; |
| 319 | simd_t stype; |
| 320 | |
| 321 | rec_simd = &rec->aux_data.rec_simd; |
| 322 | |
| 323 | assert(rec_simd->simd != NULL); |
| 324 | assert(rec_simd->simd_allowed == true); |
| 325 | |
| 326 | stype = rec_simd_type(rec); |
| 327 | |
| 328 | /* |
| 329 | * As the REC has used the SIMD, no need to disable traps as it must be |
| 330 | * already disabled as part of last restore. |
| 331 | */ |
| 332 | rec_simd->simd_allowed = false; |
| 333 | simd_save_state(stype, rec_simd->simd); |
| 334 | simd_disable(); |
| 335 | } |
| 336 | |
| 337 | /* Restore the REC SIMD state from memory and enable simd access for the REC */ |
| 338 | void rec_simd_enable_restore(struct rec *rec) |
| 339 | { |
| 340 | struct rec_simd_state *rec_simd; |
| 341 | simd_t stype; |
| 342 | |
| 343 | assert(rec != NULL); |
| 344 | rec_simd = &rec->aux_data.rec_simd; |
| 345 | |
| 346 | assert(rec_simd->simd != NULL); |
| 347 | assert(rec_simd->simd_allowed == false); |
| 348 | |
| 349 | stype = rec_simd_type(rec); |
| 350 | simd_enable(stype); |
| 351 | simd_restore_state(stype, rec_simd->simd); |
| 352 | rec_simd->simd_allowed = true; |
| 353 | /* return with traps disabled to allow REC to use FPU and/or SVE */ |
| 354 | } |
| 355 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 356 | void rec_run_loop(struct rec *rec, struct rmi_rec_exit *rec_exit) |
| 357 | { |
| 358 | struct ns_state *ns_state; |
| 359 | int realm_exception_code; |
| 360 | void *rec_aux; |
| 361 | unsigned int cpuid = my_cpuid(); |
AlexeiFedorov | ec35c54 | 2023-04-27 17:52:02 +0100 | [diff] [blame] | 362 | struct rec_attest_data *attest_data; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 363 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 364 | assert(cpuid < MAX_CPUS); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 365 | assert(rec->ns == NULL); |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 366 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 367 | ns_state = &g_ns_data[cpuid]; |
| 368 | |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 369 | /* Ensure PMU context is cleared */ |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 370 | assert(ns_state->pmu == NULL); |
| 371 | |
| 372 | rec->ns = ns_state; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 373 | |
| 374 | /* Map auxiliary granules */ |
| 375 | rec_aux = map_rec_aux(rec->g_aux, rec->num_rec_aux); |
| 376 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 377 | /* |
| 378 | * The attset heap on the REC aux pages is mapped now. It is time to |
| 379 | * associate it with the current CPU. |
| 380 | * This heap will be used for attestation RSI calls when the |
| 381 | * REC is running. |
| 382 | */ |
AlexeiFedorov | ec35c54 | 2023-04-27 17:52:02 +0100 | [diff] [blame] | 383 | attest_data = rec->aux_data.attest_data; |
| 384 | attestation_heap_ctx_assign_pe(&attest_data->alloc_info.ctx); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 385 | |
| 386 | /* |
| 387 | * Initialise the heap for attestation if necessary. |
| 388 | */ |
AlexeiFedorov | ec35c54 | 2023-04-27 17:52:02 +0100 | [diff] [blame] | 389 | if (!attest_data->alloc_info.ctx_initialised) { |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 390 | (void)attestation_heap_ctx_init(rec->aux_data.attest_heap_buf, |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 391 | REC_HEAP_SIZE); |
AlexeiFedorov | ec35c54 | 2023-04-27 17:52:02 +0100 | [diff] [blame] | 392 | attest_data->alloc_info.ctx_initialised = true; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 393 | } |
| 394 | |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 395 | rec_simd_state_init(rec); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 396 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 397 | ns_state->pmu = &g_pmu_data[cpuid]; |
| 398 | |
| 399 | save_ns_state(rec); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 400 | restore_realm_state(rec); |
| 401 | |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 402 | /* The REC must enter run loop with SIMD access disabled */ |
| 403 | assert(rec_is_simd_allowed(rec) == false); |
| 404 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 405 | do { |
| 406 | /* |
| 407 | * We must check the status of the arch timers in every |
| 408 | * iteration of the loop to ensure we update the timer |
| 409 | * mask on each entry to the realm and that we report any |
| 410 | * change in output level to the NS caller. |
| 411 | */ |
| 412 | if (check_pending_timers(rec)) { |
| 413 | rec_exit->exit_reason = RMI_EXIT_IRQ; |
| 414 | break; |
| 415 | } |
| 416 | |
| 417 | activate_events(rec); |
Arvind Ram Prakash | bd36a1b | 2022-12-15 12:16:36 -0600 | [diff] [blame] | 418 | |
| 419 | /* |
| 420 | * Restore Realm PAuth Key. |
| 421 | * There shouldn't be any other function call which uses PAuth |
| 422 | * till the RMM keys are restored. |
| 423 | */ |
| 424 | pauth_restore_realm_keys(&rec->pauth); |
| 425 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 426 | realm_exception_code = run_realm(&rec->regs[0]); |
Arvind Ram Prakash | bd36a1b | 2022-12-15 12:16:36 -0600 | [diff] [blame] | 427 | |
| 428 | /* Save Realm PAuth key. */ |
| 429 | pauth_save_realm_keys(&rec->pauth); |
| 430 | |
| 431 | /* Restore RMM PAuth key. */ |
| 432 | pauth_restore_rmm_keys(); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 433 | } while (handle_realm_exit(rec, rec_exit, realm_exception_code)); |
| 434 | |
| 435 | /* |
| 436 | * Check if FPU/SIMD was used, and if it was, save the realm state, |
| 437 | * restore the NS state, and reenable traps in CPTR_EL2. |
| 438 | */ |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 439 | if (rec_is_simd_allowed(rec)) { |
| 440 | /* Save REC SIMD state to memory and disable SIMD for REC */ |
| 441 | rec_simd_save_disable(rec); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 442 | |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 443 | /* Restore NS state based on system support for SVE or FPU */ |
| 444 | simd_restore_ns_state(); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 445 | } |
| 446 | |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 447 | report_timer_state_to_ns(rec_exit); |
| 448 | |
| 449 | save_realm_state(rec, rec_exit); |
| 450 | restore_ns_state(rec); |
| 451 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 452 | /* |
Arunachalam Ganapathy | f649121 | 2023-02-23 16:04:34 +0000 | [diff] [blame] | 453 | * Clear PMU context while exiting |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 454 | */ |
AlexeiFedorov | eaec0c4 | 2023-02-01 18:13:32 +0000 | [diff] [blame] | 455 | ns_state->pmu = NULL; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 456 | |
| 457 | /* |
| 458 | * Clear NS pointer since that struct is local to this function. |
| 459 | */ |
| 460 | rec->ns = NULL; |
| 461 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 462 | /* Undo the heap association */ |
Shruti Gupta | 9debb13 | 2022-12-13 14:38:49 +0000 | [diff] [blame] | 463 | attestation_heap_ctx_unassign_pe(); |
AlexeiFedorov | ec35c54 | 2023-04-27 17:52:02 +0100 | [diff] [blame] | 464 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 465 | /* Unmap auxiliary granules */ |
| 466 | unmap_rec_aux(rec_aux, rec->num_rec_aux); |
| 467 | } |