blob: a94d5af86402fef9de7737ee68b2ebe713ddb499 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <arch.h>
8#include <arch_features.h>
9#include <attestation.h>
10#include <buffer.h>
11#include <cpuid.h>
12#include <exit.h>
AlexeiFedoroveaec0c42023-02-01 18:13:32 +000013#include <pmu.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000014#include <rec.h>
15#include <run.h>
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +000016#include <simd.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000017#include <smc-rmi.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000018#include <timers.h>
19
20static struct ns_state g_ns_data[MAX_CPUS];
AlexeiFedoroveaec0c42023-02-01 18:13:32 +000021static struct pmu_state g_pmu_data[MAX_CPUS];
Soby Mathewb4c6df42022-11-09 11:13:29 +000022
23/*
AlexeiFedorovf72ab302023-04-27 16:45:04 +010024 * Initialize pointers in @aux_data
25 *
26 * Call with parent REC granule's lock held.
Soby Mathewb4c6df42022-11-09 11:13:29 +000027 */
AlexeiFedorovf72ab302023-04-27 16:45:04 +010028void init_rec_aux_data(struct rec_aux_data *aux_data, void *rec_aux,
29 unsigned long num_aux)
Soby Mathewb4c6df42022-11-09 11:13:29 +000030{
Soby Mathewb4c6df42022-11-09 11:13:29 +000031 /* Ensure we have enough aux granules for use by REC */
AlexeiFedorovf72ab302023-04-27 16:45:04 +010032 assert(num_aux >= REC_NUM_PAGES);
AlexeiFedoroveaec0c42023-02-01 18:13:32 +000033
34 aux_data->attest_heap_buf = (uint8_t *)rec_aux;
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +000035
AlexeiFedoroveaec0c42023-02-01 18:13:32 +000036 aux_data->pmu = (struct pmu_state *)((uint8_t *)rec_aux +
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +000037 REC_HEAP_SIZE);
38
39 aux_data->rec_simd.simd = (struct simd_state *)((uint8_t *)rec_aux +
40 REC_HEAP_SIZE +
41 REC_PMU_SIZE);
Soby Mathewb4c6df42022-11-09 11:13:29 +000042}
43
44/*
AlexeiFedorovf72ab302023-04-27 16:45:04 +010045 * Map REC auxiliary Granules
46 *
47 * Call with parent REC granule's lock held.
Soby Mathewb4c6df42022-11-09 11:13:29 +000048 */
AlexeiFedorovf72ab302023-04-27 16:45:04 +010049void *map_rec_aux(struct granule *rec_aux_pages[], unsigned long num_aux)
Soby Mathewb4c6df42022-11-09 11:13:29 +000050{
51 void *rec_aux = NULL;
52
53 for (unsigned long i = 0UL; i < num_aux; i++) {
54 void *aux = granule_map(rec_aux_pages[i], SLOT_REC_AUX0 + i);
55
56 if (i == 0UL) {
57 rec_aux = aux;
58 }
59 }
60 return rec_aux;
61}
62
AlexeiFedorovf72ab302023-04-27 16:45:04 +010063/*
64 * Unmap REC auxiliary Granules
65 *
66 * Call with parent REC granule's lock held.
67 */
68void unmap_rec_aux(void *rec_aux, unsigned long num_aux)
Soby Mathewb4c6df42022-11-09 11:13:29 +000069{
70 unsigned char *rec_aux_vaddr = (unsigned char *)rec_aux;
71
72 for (unsigned long i = 0UL; i < num_aux; i++) {
73 buffer_unmap(rec_aux_vaddr + i * GRANULE_SIZE);
74 }
75}
76
77static void save_sysreg_state(struct sysreg_state *sysregs)
78{
79 sysregs->sp_el0 = read_sp_el0();
80 sysregs->sp_el1 = read_sp_el1();
81 sysregs->elr_el1 = read_elr_el12();
82 sysregs->spsr_el1 = read_spsr_el12();
83 sysregs->pmcr_el0 = read_pmcr_el0();
Soby Mathewb4c6df42022-11-09 11:13:29 +000084 sysregs->tpidrro_el0 = read_tpidrro_el0();
85 sysregs->tpidr_el0 = read_tpidr_el0();
86 sysregs->csselr_el1 = read_csselr_el1();
87 sysregs->sctlr_el1 = read_sctlr_el12();
88 sysregs->actlr_el1 = read_actlr_el1();
89 sysregs->cpacr_el1 = read_cpacr_el12();
90 sysregs->ttbr0_el1 = read_ttbr0_el12();
91 sysregs->ttbr1_el1 = read_ttbr1_el12();
92 sysregs->tcr_el1 = read_tcr_el12();
93 sysregs->esr_el1 = read_esr_el12();
94 sysregs->afsr0_el1 = read_afsr0_el12();
95 sysregs->afsr1_el1 = read_afsr1_el12();
96 sysregs->far_el1 = read_far_el12();
97 sysregs->mair_el1 = read_mair_el12();
98 sysregs->vbar_el1 = read_vbar_el12();
99
100 sysregs->contextidr_el1 = read_contextidr_el12();
101 sysregs->tpidr_el1 = read_tpidr_el1();
102 sysregs->amair_el1 = read_amair_el12();
103 sysregs->cntkctl_el1 = read_cntkctl_el12();
104 sysregs->par_el1 = read_par_el1();
105 sysregs->mdscr_el1 = read_mdscr_el1();
106 sysregs->mdccint_el1 = read_mdccint_el1();
107 sysregs->disr_el1 = read_disr_el1();
108 MPAM(sysregs->mpam0_el1 = read_mpam0_el1();)
109
110 /* Timer registers */
111 sysregs->cntpoff_el2 = read_cntpoff_el2();
112 sysregs->cntvoff_el2 = read_cntvoff_el2();
113 sysregs->cntp_ctl_el0 = read_cntp_ctl_el02();
114 sysregs->cntp_cval_el0 = read_cntp_cval_el02();
115 sysregs->cntv_ctl_el0 = read_cntv_ctl_el02();
116 sysregs->cntv_cval_el0 = read_cntv_cval_el02();
117}
118
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000119static void save_realm_state(struct rec *rec, struct rmi_rec_exit *rec_exit)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000120{
121 save_sysreg_state(&rec->sysregs);
122
123 rec->pc = read_elr_el2();
124 rec->pstate = read_spsr_el2();
125
126 gic_save_state(&rec->sysregs.gicstate);
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000127
128 if (rec->realm_info.pmu_enabled) {
129 /* Expose PMU Realm state to NS */
130 pmu_update_rec_exit(rec_exit);
131
132 /* Save PMU context */
133 pmu_save_state(rec->aux_data.pmu,
AlexeiFedorov18002922023-04-06 10:19:51 +0100134 rec->realm_info.pmu_num_ctrs);
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000135 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000136}
137
138static void restore_sysreg_state(struct sysreg_state *sysregs)
139{
140 write_sp_el0(sysregs->sp_el0);
141 write_sp_el1(sysregs->sp_el1);
142 write_elr_el12(sysregs->elr_el1);
143 write_spsr_el12(sysregs->spsr_el1);
144 write_pmcr_el0(sysregs->pmcr_el0);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000145 write_tpidrro_el0(sysregs->tpidrro_el0);
146 write_tpidr_el0(sysregs->tpidr_el0);
147 write_csselr_el1(sysregs->csselr_el1);
148 write_sctlr_el12(sysregs->sctlr_el1);
149 write_actlr_el1(sysregs->actlr_el1);
150 write_cpacr_el12(sysregs->cpacr_el1);
151 write_ttbr0_el12(sysregs->ttbr0_el1);
152 write_ttbr1_el12(sysregs->ttbr1_el1);
153 write_tcr_el12(sysregs->tcr_el1);
154 write_esr_el12(sysregs->esr_el1);
155 write_afsr0_el12(sysregs->afsr0_el1);
156 write_afsr1_el12(sysregs->afsr1_el1);
157 write_far_el12(sysregs->far_el1);
158 write_mair_el12(sysregs->mair_el1);
159 write_vbar_el12(sysregs->vbar_el1);
160
161 write_contextidr_el12(sysregs->contextidr_el1);
162 write_tpidr_el1(sysregs->tpidr_el1);
163 write_amair_el12(sysregs->amair_el1);
164 write_cntkctl_el12(sysregs->cntkctl_el1);
165 write_par_el1(sysregs->par_el1);
166 write_mdscr_el1(sysregs->mdscr_el1);
167 write_mdccint_el1(sysregs->mdccint_el1);
168 write_disr_el1(sysregs->disr_el1);
169 MPAM(write_mpam0_el1(sysregs->mpam0_el1);)
170 write_vmpidr_el2(sysregs->vmpidr_el2);
171
172 /* Timer registers */
173 write_cntpoff_el2(sysregs->cntpoff_el2);
174 write_cntvoff_el2(sysregs->cntvoff_el2);
175
176 /*
177 * Restore CNTx_CVAL registers before CNTx_CTL to avoid
178 * raising the interrupt signal briefly before lowering
179 * it again due to some expired CVAL left in the timer
180 * register.
181 */
182 write_cntp_cval_el02(sysregs->cntp_cval_el0);
183 write_cntp_ctl_el02(sysregs->cntp_ctl_el0);
184 write_cntv_cval_el02(sysregs->cntv_cval_el0);
185 write_cntv_ctl_el02(sysregs->cntv_ctl_el0);
186}
187
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000188static void configure_realm_stage2(struct rec *rec)
189{
190 write_vtcr_el2(rec->common_sysregs.vtcr_el2);
191 write_vttbr_el2(rec->common_sysregs.vttbr_el2);
192}
193
Soby Mathewb4c6df42022-11-09 11:13:29 +0000194static void restore_realm_state(struct rec *rec)
195{
196 /*
197 * Restore this early to give time to the timer mask to propagate to
198 * the GIC. Issue an ISB to ensure the register write is actually
199 * performed before doing the remaining work.
200 */
201 write_cnthctl_el2(rec->sysregs.cnthctl_el2);
202 isb();
203
204 restore_sysreg_state(&rec->sysregs);
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000205
Soby Mathewb4c6df42022-11-09 11:13:29 +0000206 write_elr_el2(rec->pc);
207 write_spsr_el2(rec->pstate);
208 write_hcr_el2(rec->sysregs.hcr_el2);
209
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000210 /* Control trapping of accesses to PMU registers */
211 write_mdcr_el2(rec->common_sysregs.mdcr_el2);
212
Soby Mathewb4c6df42022-11-09 11:13:29 +0000213 gic_restore_state(&rec->sysregs.gicstate);
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000214
215 configure_realm_stage2(rec);
216
217 if (rec->realm_info.pmu_enabled) {
218 /* Restore PMU context */
219 pmu_restore_state(rec->aux_data.pmu,
AlexeiFedorov18002922023-04-06 10:19:51 +0100220 rec->realm_info.pmu_num_ctrs);
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000221 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000222}
223
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000224static void save_ns_state(struct rec *rec)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000225{
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000226 struct ns_state *ns_state = rec->ns;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000227
Soby Mathewb4c6df42022-11-09 11:13:29 +0000228 save_sysreg_state(&ns_state->sysregs);
229
230 /*
231 * CNTHCTL_EL2 is saved/restored separately from the main system
232 * registers, because the Realm configuration is written on every
233 * entry to the Realm, see `check_pending_timers`.
234 */
235 ns_state->sysregs.cnthctl_el2 = read_cnthctl_el2();
236
237 ns_state->icc_sre_el2 = read_icc_sre_el2();
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000238
239 if (rec->realm_info.pmu_enabled) {
240 /* Save PMU context */
AlexeiFedorov18002922023-04-06 10:19:51 +0100241 pmu_save_state(ns_state->pmu, rec->realm_info.pmu_num_ctrs);
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000242 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000243}
244
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000245static void restore_ns_state(struct rec *rec)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000246{
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000247 struct ns_state *ns_state = rec->ns;
248
Soby Mathewb4c6df42022-11-09 11:13:29 +0000249 restore_sysreg_state(&ns_state->sysregs);
250
251 /*
252 * CNTHCTL_EL2 is saved/restored separately from the main system
253 * registers, because the Realm configuration is written on every
254 * entry to the Realm, see `check_pending_timers`.
255 */
256 write_cnthctl_el2(ns_state->sysregs.cnthctl_el2);
257
258 write_icc_sre_el2(ns_state->icc_sre_el2);
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000259
260 if (rec->realm_info.pmu_enabled) {
261 /* Restore PMU state */
262 pmu_restore_state(ns_state->pmu,
AlexeiFedorov18002922023-04-06 10:19:51 +0100263 rec->realm_info.pmu_num_ctrs);
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000264 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000265}
266
267static void activate_events(struct rec *rec)
268{
269 /*
270 * The only event that may be activated at the Realm is the SError.
271 */
272 if (rec->serror_info.inject) {
273 write_vsesr_el2(rec->serror_info.vsesr_el2);
274 write_hcr_el2(rec->sysregs.hcr_el2 | HCR_VSE);
275 rec->serror_info.inject = false;
276 }
277}
278
279void inject_serror(struct rec *rec, unsigned long vsesr)
280{
281 rec->serror_info.vsesr_el2 = vsesr;
282 rec->serror_info.inject = true;
283}
284
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000285/* Initialize REC simd state once on the first REC enter */
286static void rec_simd_state_init(struct rec *rec)
287{
288 struct rec_simd_state *rec_simd;
289 simd_t stype;
290
291 rec_simd = &rec->aux_data.rec_simd;
292 assert(rec_simd->simd != NULL);
293
294 if (rec_simd->init_done == true) {
295 return;
296 }
297
298 stype = rec_simd_type(rec);
299 /*
300 * As part of lazy save/restore, the first state will be restored from
301 * the REC's simd_state. So the initial state is considered saved, call
302 * simd_state_init() to set the simd type. sve_vq will be set if the REC
303 * 'stype' is SIMD_SVE.
304 */
305 simd_state_init(stype, rec_simd->simd, rec->realm_info.sve_vq);
306 rec_simd->simd_allowed = false;
307 rec_simd->init_done = true;
308}
309
310/* Save the REC SIMD state to memory and disable simd access for the REC */
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000311void rec_simd_save_disable(struct rec *rec)
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000312{
313 struct rec_simd_state *rec_simd;
314 simd_t stype;
315
316 rec_simd = &rec->aux_data.rec_simd;
317
318 assert(rec_simd->simd != NULL);
319 assert(rec_simd->simd_allowed == true);
320
321 stype = rec_simd_type(rec);
322
323 /*
324 * As the REC has used the SIMD, no need to disable traps as it must be
325 * already disabled as part of last restore.
326 */
327 rec_simd->simd_allowed = false;
328 simd_save_state(stype, rec_simd->simd);
329 simd_disable();
330}
331
332/* Restore the REC SIMD state from memory and enable simd access for the REC */
333void rec_simd_enable_restore(struct rec *rec)
334{
335 struct rec_simd_state *rec_simd;
336 simd_t stype;
337
338 assert(rec != NULL);
339 rec_simd = &rec->aux_data.rec_simd;
340
341 assert(rec_simd->simd != NULL);
342 assert(rec_simd->simd_allowed == false);
343
344 stype = rec_simd_type(rec);
345 simd_enable(stype);
346 simd_restore_state(stype, rec_simd->simd);
347 rec_simd->simd_allowed = true;
348 /* return with traps disabled to allow REC to use FPU and/or SVE */
349}
350
Soby Mathewb4c6df42022-11-09 11:13:29 +0000351void rec_run_loop(struct rec *rec, struct rmi_rec_exit *rec_exit)
352{
353 struct ns_state *ns_state;
354 int realm_exception_code;
355 void *rec_aux;
356 unsigned int cpuid = my_cpuid();
357
Soby Mathewb4c6df42022-11-09 11:13:29 +0000358 assert(cpuid < MAX_CPUS);
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000359 assert(rec->ns == NULL);
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000360
Soby Mathewb4c6df42022-11-09 11:13:29 +0000361 ns_state = &g_ns_data[cpuid];
362
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000363 /* Ensure PMU context is cleared */
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000364 assert(ns_state->pmu == NULL);
365
366 rec->ns = ns_state;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000367
368 /* Map auxiliary granules */
369 rec_aux = map_rec_aux(rec->g_aux, rec->num_rec_aux);
370
AlexeiFedorovf72ab302023-04-27 16:45:04 +0100371 init_rec_aux_data(&(rec->aux_data), rec_aux, rec->num_rec_aux);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000372
373 /*
374 * The attset heap on the REC aux pages is mapped now. It is time to
375 * associate it with the current CPU.
376 * This heap will be used for attestation RSI calls when the
377 * REC is running.
378 */
379 attestation_heap_ctx_assign_pe(&rec->alloc_info.ctx);
380
381 /*
382 * Initialise the heap for attestation if necessary.
383 */
384 if (!rec->alloc_info.ctx_initialised) {
385 (void)attestation_heap_ctx_init(rec->aux_data.attest_heap_buf,
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000386 REC_HEAP_SIZE);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000387 rec->alloc_info.ctx_initialised = true;
388 }
389
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000390 rec_simd_state_init(rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000391
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000392 ns_state->pmu = &g_pmu_data[cpuid];
393
394 save_ns_state(rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000395 restore_realm_state(rec);
396
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000397 /* The REC must enter run loop with SIMD access disabled */
398 assert(rec_is_simd_allowed(rec) == false);
399
Soby Mathewb4c6df42022-11-09 11:13:29 +0000400 do {
401 /*
402 * We must check the status of the arch timers in every
403 * iteration of the loop to ensure we update the timer
404 * mask on each entry to the realm and that we report any
405 * change in output level to the NS caller.
406 */
407 if (check_pending_timers(rec)) {
408 rec_exit->exit_reason = RMI_EXIT_IRQ;
409 break;
410 }
411
412 activate_events(rec);
Arvind Ram Prakashbd36a1b2022-12-15 12:16:36 -0600413
414 /*
415 * Restore Realm PAuth Key.
416 * There shouldn't be any other function call which uses PAuth
417 * till the RMM keys are restored.
418 */
419 pauth_restore_realm_keys(&rec->pauth);
420
Soby Mathewb4c6df42022-11-09 11:13:29 +0000421 realm_exception_code = run_realm(&rec->regs[0]);
Arvind Ram Prakashbd36a1b2022-12-15 12:16:36 -0600422
423 /* Save Realm PAuth key. */
424 pauth_save_realm_keys(&rec->pauth);
425
426 /* Restore RMM PAuth key. */
427 pauth_restore_rmm_keys();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000428 } while (handle_realm_exit(rec, rec_exit, realm_exception_code));
429
430 /*
431 * Check if FPU/SIMD was used, and if it was, save the realm state,
432 * restore the NS state, and reenable traps in CPTR_EL2.
433 */
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000434 if (rec_is_simd_allowed(rec)) {
435 /* Save REC SIMD state to memory and disable SIMD for REC */
436 rec_simd_save_disable(rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000437
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000438 /* Restore NS state based on system support for SVE or FPU */
439 simd_restore_ns_state();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000440 }
441
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000442 report_timer_state_to_ns(rec_exit);
443
444 save_realm_state(rec, rec_exit);
445 restore_ns_state(rec);
446
Soby Mathewb4c6df42022-11-09 11:13:29 +0000447 /*
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000448 * Clear PMU context while exiting
Soby Mathewb4c6df42022-11-09 11:13:29 +0000449 */
AlexeiFedoroveaec0c42023-02-01 18:13:32 +0000450 ns_state->pmu = NULL;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000451
452 /*
453 * Clear NS pointer since that struct is local to this function.
454 */
455 rec->ns = NULL;
456
Soby Mathewb4c6df42022-11-09 11:13:29 +0000457 /* Undo the heap association */
Shruti Gupta9debb132022-12-13 14:38:49 +0000458 attestation_heap_ctx_unassign_pe();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000459 /* Unmap auxiliary granules */
460 unmap_rec_aux(rec_aux, rec->num_rec_aux);
461}