blob: 763aab29e564b81e4bfe426a0a43d36859bff636 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +00009#include <buffer.h>
10#include <esr.h>
11#include <exit.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000012#include <gic.h>
13#include <granule.h>
14#include <inject_exp.h>
15#include <memory_alloc.h>
16#include <psci.h>
17#include <realm.h>
18#include <realm_attest.h>
19#include <rec.h>
20#include <rsi-config.h>
21#include <rsi-handler.h>
22#include <rsi-host-call.h>
23#include <rsi-logger.h>
24#include <rsi-memory.h>
25#include <rsi-walk.h>
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +000026#include <run.h>
27#include <simd.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000028#include <smc-rmi.h>
29#include <smc-rsi.h>
30#include <status.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000031#include <sysreg_traps.h>
32#include <table.h>
33
34void save_fpu_state(struct fpu_state *fpu);
35void restore_fpu_state(struct fpu_state *fpu);
36
37static void system_abort(void)
38{
39 /*
40 * TODO: report the abort to the EL3.
41 * We need to establish the exact EL3 API first.
42 */
43 assert(false);
44}
45
46static bool fixup_aarch32_data_abort(struct rec *rec, unsigned long *esr)
47{
48 unsigned long spsr = read_spsr_el2();
49
50 if ((spsr & SPSR_EL2_nRW_AARCH32) != 0UL) {
51 /*
52 * mmio emulation of AArch32 reads/writes is not supported.
53 */
54 *esr &= ~ESR_EL2_ABORT_ISV_BIT;
55 return true;
56 }
57 return false;
58}
59
60static unsigned long get_dabt_write_value(struct rec *rec, unsigned long esr)
61{
62 unsigned int rt = esr_srt(esr);
63
64 /* Handle xzr */
65 if (rt == 31U) {
66 return 0UL;
67 }
68 return rec->regs[rt] & access_mask(esr);
69}
70
71/*
72 * Returns 'true' if access from @rec to @addr is within the Protected IPA space.
73 */
74static bool access_in_rec_par(struct rec *rec, unsigned long addr)
75{
76 /*
77 * It is OK to check only the base address of the access because:
78 * - The Protected IPA space starts at address zero.
79 * - The IPA width is below 64 bits, therefore the access cannot
80 * wrap around.
81 */
82 return addr_in_rec_par(rec, addr);
83}
84
85/*
86 * Returns 'true' if the @ipa is in PAR and its RIPAS is 'empty'.
87 *
88 * @ipa must be aligned to the granule size.
89 */
90static bool ipa_is_empty(unsigned long ipa, struct rec *rec)
91{
92 unsigned long s2tte, *ll_table;
93 struct rtt_walk wi;
Soby Mathewb4c6df42022-11-09 11:13:29 +000094 bool ret;
95
96 assert(GRANULE_ALIGNED(ipa));
97
98 if (!addr_in_rec_par(rec, ipa)) {
99 return false;
100 }
101 granule_lock(rec->realm_info.g_rtt, GRANULE_STATE_RTT);
102
103 rtt_walk_lock_unlock(rec->realm_info.g_rtt,
104 rec->realm_info.s2_starting_level,
105 rec->realm_info.ipa_bits,
106 ipa, RTT_PAGE_LEVEL, &wi);
107
108 ll_table = granule_map(wi.g_llt, SLOT_RTT);
109 s2tte = s2tte_read(&ll_table[wi.index]);
110
AlexeiFedorov5583bcf2023-04-17 11:58:11 +0100111 ret = s2tte_is_unassigned_empty(s2tte) ||
112 s2tte_is_assigned_empty(s2tte, wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000113
Soby Mathewb4c6df42022-11-09 11:13:29 +0000114 buffer_unmap(ll_table);
115 granule_unlock(wi.g_llt);
116 return ret;
117}
118
119static bool fsc_is_external_abort(unsigned long fsc)
120{
121 if (fsc == ESR_EL2_ABORT_FSC_SEA) {
122 return true;
123 }
124
125 if ((fsc >= ESR_EL2_ABORT_FSC_SEA_TTW_START) &&
126 (fsc <= ESR_EL2_ABORT_FSC_SEA_TTW_END)) {
127 return true;
128 }
129
130 return false;
131}
132
133/*
134 * Handles Data/Instruction Aborts at a lower EL with External Abort fault
135 * status code (D/IFSC).
136 * Returns 'true' if the exception is the external abort and the `rec_exit`
137 * structure is populated, 'false' otherwise.
138 */
139static bool handle_sync_external_abort(struct rec *rec,
140 struct rmi_rec_exit *rec_exit,
141 unsigned long esr)
142{
AlexeiFedorov537bee02023-02-02 13:38:23 +0000143 unsigned long fsc = esr & MASK(ESR_EL2_ABORT_FSC);
144 unsigned long set = esr & MASK(ESR_EL2_ABORT_SET);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000145
146 if (!fsc_is_external_abort(fsc)) {
147 return false;
148 }
149
150 switch (set) {
151 case ESR_EL2_ABORT_SET_UER:
152 /*
153 * The recoverable SEA.
154 * Inject the sync. abort into the Realm.
155 * Report the exception to the host.
156 */
157 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
158 /*
159 * Fall through.
160 */
161 case ESR_EL2_ABORT_SET_UEO:
162 /*
163 * The restartable SEA.
164 * Report the exception to the host.
165 * The REC restarts the same instruction.
166 */
167 rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
168
169 /*
170 * The value of the HPFAR_EL2 is not provided to the host as
171 * it is undefined for external aborts.
172 *
173 * We also don't provide the content of FAR_EL2 because it
174 * has no practical value to the host without the HPFAR_EL2.
175 */
176 break;
177 case ESR_EL2_ABORT_SET_UC:
178 /*
179 * The uncontainable SEA.
180 * Fatal to the system.
181 */
182 system_abort();
183 break;
184 default:
185 assert(false);
186 }
187
188 return true;
189}
190
191void emulate_stage2_data_abort(struct rec *rec,
192 struct rmi_rec_exit *rec_exit,
193 unsigned long rtt_level)
194{
195 unsigned long fipa = rec->regs[1];
196
197 assert(rtt_level <= RTT_PAGE_LEVEL);
198
199 /*
200 * Setup Exception Syndrom Register to emulate a real data abort
201 * and return to NS host to handle it.
202 */
203 rec_exit->esr = (ESR_EL2_EC_DATA_ABORT |
204 (ESR_EL2_ABORT_FSC_TRANSLATION_FAULT_L0 + rtt_level));
205 rec_exit->far = 0UL;
206 rec_exit->hpfar = fipa >> HPFAR_EL2_FIPA_OFFSET;
207 rec_exit->exit_reason = RMI_EXIT_SYNC;
208}
209
210/*
211 * Returns 'true' if the abort is handled and the RMM should return to the Realm,
212 * and returns 'false' if the exception should be reported to the HS host.
213 */
214static bool handle_data_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
215 unsigned long esr)
216{
217 unsigned long far = 0UL;
218 unsigned long hpfar = read_hpfar_el2();
AlexeiFedorov537bee02023-02-02 13:38:23 +0000219 unsigned long fipa = (hpfar & MASK(HPFAR_EL2_FIPA)) << HPFAR_EL2_FIPA_OFFSET;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000220 unsigned long write_val = 0UL;
221
222 if (handle_sync_external_abort(rec, rec_exit, esr)) {
223 /*
224 * All external aborts are immediately reported to the host.
225 */
226 return false;
227 }
228
229 /*
230 * The memory access that crosses a page boundary may cause two aborts
231 * with `hpfar_el2` values referring to two consecutive pages.
232 *
233 * Insert the SEA and return to the Realm if the granule's RIPAS is EMPTY.
234 */
235 if (ipa_is_empty(fipa, rec)) {
236 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
237 return true;
238 }
239
240 if (fixup_aarch32_data_abort(rec, &esr) ||
241 access_in_rec_par(rec, fipa)) {
242 esr &= ESR_NONEMULATED_ABORT_MASK;
243 goto end;
244 }
245
246 if (esr_is_write(esr)) {
247 write_val = get_dabt_write_value(rec, esr);
248 }
249
250 far = read_far_el2() & ~GRANULE_MASK;
251 esr &= ESR_EMULATED_ABORT_MASK;
252
253end:
254 rec_exit->esr = esr;
255 rec_exit->far = far;
256 rec_exit->hpfar = hpfar;
257 rec_exit->gprs[0] = write_val;
258
259 return false;
260}
261
262/*
263 * Returns 'true' if the abort is handled and the RMM should return to the Realm,
264 * and returns 'false' if the exception should be reported to the NS host.
265 */
266static bool handle_instruction_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
267 unsigned long esr)
268{
AlexeiFedorov537bee02023-02-02 13:38:23 +0000269 unsigned long fsc = esr & MASK(ESR_EL2_ABORT_FSC);
270 unsigned long fsc_type = fsc & ~MASK(ESR_EL2_ABORT_FSC_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000271 unsigned long hpfar = read_hpfar_el2();
AlexeiFedorov537bee02023-02-02 13:38:23 +0000272 unsigned long fipa = (hpfar & MASK(HPFAR_EL2_FIPA)) << HPFAR_EL2_FIPA_OFFSET;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000273
274 if (handle_sync_external_abort(rec, rec_exit, esr)) {
275 /*
276 * All external aborts are immediately reported to the host.
277 */
278 return false;
279 }
280
281 /*
282 * Insert the SEA and return to the Realm if:
283 * - The instruction abort is at an Unprotected IPA, or
284 * - The granule's RIPAS is EMPTY
285 */
286 if (!access_in_rec_par(rec, fipa) || ipa_is_empty(fipa, rec)) {
287 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
288 return true;
289 }
290
291 if (fsc_type != ESR_EL2_ABORT_FSC_TRANSLATION_FAULT) {
292 unsigned long far = read_far_el2();
293
294 /*
295 * TODO: Should this ever happen, or is it an indication of an
296 * internal consistency failure in the RMM which should lead
297 * to a panic instead?
298 */
299
300 ERROR("Unhandled instruction abort:\n");
301 ERROR(" FSC: %12s0x%02lx\n", " ", fsc);
302 ERROR(" FAR: %16lx\n", far);
303 ERROR(" HPFAR: %16lx\n", hpfar);
304 return false;
305 }
306
307 rec_exit->hpfar = hpfar;
308 rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
309
310 return false;
311}
312
313/*
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000314 * Handle FPU or SVE exceptions.
315 * Returns: true if the exception is handled.
316 */
317static bool
318handle_simd_exception(simd_t exp_type, struct rec *rec)
319{
320 /*
321 * If the REC wants to use SVE and if SVE is not enabled for this REC
322 * then inject undefined abort. This can happen when CPU implements
323 * FEAT_SVE but the Realm didn't request this feature during creation.
324 */
325 if (exp_type == SIMD_SVE && rec_simd_type(rec) != SIMD_SVE) {
326 realm_inject_undef_abort();
327 return true;
328 }
329
330 /* FPU or SVE exception can happen only when REC hasn't used SIMD */
331 assert(rec_is_simd_allowed(rec) == false);
332
333 /*
334 * Allow the REC to use SIMD. Save NS SIMD state and restore REC SIMD
335 * state from memory to registers.
336 */
337 simd_save_ns_state();
338 rec_simd_enable_restore(rec);
339
340 /*
341 * Return 'true' indicating that this exception has been handled and
342 * execution can continue.
343 */
344 return true;
345}
346
347/*
Soby Mathewb4c6df42022-11-09 11:13:29 +0000348 * Return 'false' if no IRQ is pending,
349 * return 'true' if there is an IRQ pending, and need to return to host.
350 */
351static bool check_pending_irq(void)
352{
353 unsigned long pending_irq;
354
355 pending_irq = read_isr_el1();
356
357 return (pending_irq != 0UL);
358}
359
360static void advance_pc(void)
361{
362 unsigned long pc = read_elr_el2();
363
364 write_elr_el2(pc + 4UL);
365}
366
367static void return_result_to_realm(struct rec *rec, struct smc_result result)
368{
369 rec->regs[0] = result.x[0];
370 rec->regs[1] = result.x[1];
371 rec->regs[2] = result.x[2];
372 rec->regs[3] = result.x[3];
373}
374
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000375static inline bool rsi_handler_needs_fpu(unsigned int id)
376{
377#ifdef RMM_FPU_USE_AT_REL2
378 if (id == SMC_RSI_ATTEST_TOKEN_CONTINUE ||
379 id == SMC_RSI_MEASUREMENT_EXTEND) {
380 return true;
381 }
382#endif
383 return false;
384}
385
Soby Mathewb4c6df42022-11-09 11:13:29 +0000386/*
387 * Return 'true' if execution should continue in the REC, otherwise return
388 * 'false' to go back to the NS caller of REC.Enter.
389 */
390static bool handle_realm_rsi(struct rec *rec, struct rmi_rec_exit *rec_exit)
391{
392 bool ret_to_rec = true; /* Return to Realm */
Shruti Gupta9debb132022-12-13 14:38:49 +0000393 unsigned int function_id = (unsigned int)rec->regs[0];
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000394 bool restore_rec_simd_state = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000395
AlexeiFedorov6c119692023-04-21 12:31:15 +0100396 RSI_LOG_SET(rec->regs);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000397
Arunachalam Ganapathy937b5492023-02-28 11:17:52 +0000398 /* Ignore SVE hint bit, until RMM supports SVE hint bit */
399 function_id &= ~MASK(SMC_SVE_HINT);
400
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000401 if (rsi_handler_needs_fpu(function_id) == true) {
402 /*
403 * RSI handler uses FPU at REL2, so actively save REC SIMD state
404 * if REC is using SIMD or NS SIMD state. Restore the same before
405 * return from this function.
406 */
407 if (rec_is_simd_allowed(rec)) {
408 rec_simd_save_disable(rec);
409 restore_rec_simd_state = true;
410 } else {
411 simd_save_ns_state();
412 }
413 } else if (rec_is_simd_allowed(rec)) {
414 /*
415 * If the REC is allowed to access SIMD, then we will enter RMM
416 * with SIMD traps disabled. So enable SIMD traps as RMM by
417 * default runs with SIMD traps enabled
418 */
Arunachalam Ganapathy43c2c6b2023-03-24 15:06:34 +0000419 simd_disable();
420 }
421
Soby Mathewb4c6df42022-11-09 11:13:29 +0000422 switch (function_id) {
423 case SMCCC_VERSION:
424 rec->regs[0] = SMCCC_VERSION_NUMBER;
425 break;
426 case SMC_RSI_ABI_VERSION:
427 rec->regs[0] = system_rsi_abi_version();
428 break;
429 case SMC32_PSCI_FID_MIN ... SMC32_PSCI_FID_MAX:
430 case SMC64_PSCI_FID_MIN ... SMC64_PSCI_FID_MAX: {
431 struct psci_result res;
432
433 res = psci_rsi(rec,
434 function_id,
435 rec->regs[1],
436 rec->regs[2],
437 rec->regs[3]);
438
439 if (!rec->psci_info.pending) {
440 rec->regs[0] = res.smc_res.x[0];
441 rec->regs[1] = res.smc_res.x[1];
442 rec->regs[2] = res.smc_res.x[2];
443 rec->regs[3] = res.smc_res.x[3];
444 }
445
446 if (res.hvc_forward.forward_psci_call) {
447 unsigned int i;
448
449 rec_exit->exit_reason = RMI_EXIT_PSCI;
450 rec_exit->gprs[0] = function_id;
451 rec_exit->gprs[1] = res.hvc_forward.x1;
452 rec_exit->gprs[2] = res.hvc_forward.x2;
453 rec_exit->gprs[3] = res.hvc_forward.x3;
454
455 for (i = 4U; i < REC_EXIT_NR_GPRS; i++) {
456 rec_exit->gprs[i] = 0UL;
457 }
458
459 advance_pc();
460 ret_to_rec = false;
461 }
462 break;
463 }
464 case SMC_RSI_ATTEST_TOKEN_INIT:
465 rec->regs[0] = handle_rsi_attest_token_init(rec);
466 break;
467 case SMC_RSI_ATTEST_TOKEN_CONTINUE: {
468 struct attest_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000469 while (true) {
470 /*
471 * Possible outcomes:
472 * if res.incomplete is true
473 * if IRQ pending
474 * check for pending IRQ and return to host
475 * else try a new iteration
476 * else
477 * if RTT table walk has failed,
478 * emulate data abort back to host
479 * otherwise
480 * return to realm because the token
481 * creation is complete or input parameter
482 * validation failed.
483 */
484 handle_rsi_attest_token_continue(rec, &res);
485
486 if (res.incomplete) {
487 if (check_pending_irq()) {
488 rec_exit->exit_reason = RMI_EXIT_IRQ;
Soby Mathew1be30212023-05-16 15:06:59 +0100489
490 /* Copy the result to rec prior to return to host */
491 return_result_to_realm(rec, res.smc_res);
492 advance_pc();
493
Soby Mathewb4c6df42022-11-09 11:13:29 +0000494 /* Return to NS host to handle IRQ. */
495 ret_to_rec = false;
496 break;
497 }
498 } else {
499 if (res.walk_result.abort) {
500 emulate_stage2_data_abort(
501 rec, rec_exit,
502 res.walk_result.rtt_level);
503 ret_to_rec = false; /* Exit to Host */
504 break;
505 }
506
507 /* Return to Realm */
508 return_result_to_realm(rec, res.smc_res);
509 break;
510 }
511 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000512 break;
513 }
514 case SMC_RSI_MEASUREMENT_READ:
515 rec->regs[0] = handle_rsi_read_measurement(rec);
516 break;
517 case SMC_RSI_MEASUREMENT_EXTEND:
518 rec->regs[0] = handle_rsi_extend_measurement(rec);
519 break;
520 case SMC_RSI_REALM_CONFIG: {
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000521 struct rsi_walk_smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000522
523 res = handle_rsi_realm_config(rec);
524 if (res.walk_result.abort) {
525 emulate_stage2_data_abort(rec, rec_exit,
526 res.walk_result.rtt_level);
527 ret_to_rec = false; /* Exit to Host */
528 } else {
529 /* Return to Realm */
530 return_result_to_realm(rec, res.smc_res);
531 }
532 break;
533 }
534 case SMC_RSI_IPA_STATE_SET:
535 if (handle_rsi_ipa_state_set(rec, rec_exit)) {
536 rec->regs[0] = RSI_ERROR_INPUT;
537 } else {
538 advance_pc();
539 ret_to_rec = false; /* Return to Host */
540 }
541 break;
542 case SMC_RSI_IPA_STATE_GET: {
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000543 struct rsi_walk_smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000544
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000545 res = handle_rsi_ipa_state_get(rec);
546 if (res.walk_result.abort) {
547 emulate_stage2_data_abort(rec, rec_exit,
548 res.walk_result.rtt_level);
549 /* Exit to Host */
550 ret_to_rec = false;
551 } else {
552 /* Exit to Realm */
553 return_result_to_realm(rec, res.smc_res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000554 }
555 break;
556 }
557 case SMC_RSI_HOST_CALL: {
558 struct rsi_host_call_result res;
559
560 res = handle_rsi_host_call(rec, rec_exit);
561
562 if (res.walk_result.abort) {
563 emulate_stage2_data_abort(rec, rec_exit,
564 res.walk_result.rtt_level);
AlexeiFedorov591967c2022-11-16 17:47:34 +0000565 /* Exit to Host */
566 ret_to_rec = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000567 } else {
568 rec->regs[0] = res.smc_result;
569
570 /*
571 * Return to Realm in case of error,
572 * parent function calls advance_pc()
573 */
574 if (rec->regs[0] == RSI_SUCCESS) {
575 advance_pc();
576
577 /* Exit to Host */
578 rec->host_call = true;
579 rec_exit->exit_reason = RMI_EXIT_HOST_CALL;
580 ret_to_rec = false;
581 }
582 }
583 break;
584 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000585 default:
586 rec->regs[0] = SMC_UNKNOWN;
587 break;
588 }
589
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000590 if (rsi_handler_needs_fpu(function_id) == true) {
591 if (restore_rec_simd_state == true) {
592 rec_simd_enable_restore(rec);
593 } else {
594 simd_restore_ns_state();
595 }
596 } else if (rec_is_simd_allowed(rec)) {
Arunachalam Ganapathy43c2c6b2023-03-24 15:06:34 +0000597 simd_enable(rec_simd_type(rec));
598 }
599
Soby Mathewb4c6df42022-11-09 11:13:29 +0000600 /* Log RSI call */
AlexeiFedorov6c119692023-04-21 12:31:15 +0100601 RSI_LOG_EXIT(function_id, rec->regs, ret_to_rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000602 return ret_to_rec;
603}
604
605/*
606 * Return 'true' if the RMM handled the exception,
607 * 'false' to return to the Non-secure host.
608 */
609static bool handle_exception_sync(struct rec *rec, struct rmi_rec_exit *rec_exit)
610{
611 const unsigned long esr = read_esr_el2();
612
AlexeiFedorov537bee02023-02-02 13:38:23 +0000613 switch (esr & MASK(ESR_EL2_EC)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000614 case ESR_EL2_EC_WFX:
AlexeiFedorov537bee02023-02-02 13:38:23 +0000615 rec_exit->esr = esr & (MASK(ESR_EL2_EC) | ESR_EL2_WFx_TI_BIT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000616 advance_pc();
617 return false;
618 case ESR_EL2_EC_HVC:
619 realm_inject_undef_abort();
620 return true;
621 case ESR_EL2_EC_SMC:
622 if (!handle_realm_rsi(rec, rec_exit)) {
623 return false;
624 }
625 /*
626 * Advance PC.
627 * HCR_EL2.TSC traps execution of the SMC instruction.
628 * It is not a routing control for the SMC exception.
629 * Trap exceptions and SMC exceptions have different
630 * preferred return addresses.
631 */
632 advance_pc();
633 return true;
634 case ESR_EL2_EC_SYSREG: {
635 bool ret = handle_sysreg_access_trap(rec, rec_exit, esr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000636 advance_pc();
637 return ret;
638 }
639 case ESR_EL2_EC_INST_ABORT:
640 return handle_instruction_abort(rec, rec_exit, esr);
641 case ESR_EL2_EC_DATA_ABORT:
642 return handle_data_abort(rec, rec_exit, esr);
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000643 case ESR_EL2_EC_FPU:
644 return handle_simd_exception(SIMD_FPU, rec);
645 case ESR_EL2_EC_SVE:
646 return handle_simd_exception(SIMD_SVE, rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000647 default:
648 /*
649 * TODO: Check if there are other exit reasons we could
650 * encounter here and handle them appropriately
651 */
652 break;
653 }
654
655 VERBOSE("Unhandled sync exit ESR: %08lx (EC: %lx ISS: %lx)\n",
AlexeiFedorov537bee02023-02-02 13:38:23 +0000656 esr, EXTRACT(ESR_EL2_EC, esr), EXTRACT(ESR_EL2_ISS, esr));
Soby Mathewb4c6df42022-11-09 11:13:29 +0000657
658 /*
659 * Zero values in esr, far & hpfar of 'rec_exit' structure
660 * will be returned to the NS host.
661 * The only information that may leak is when there was
662 * some unhandled/unknown reason for the exception.
663 */
664 return false;
665}
666
667/*
668 * Return 'true' if the RMM handled the exception, 'false' to return to the
669 * Non-secure host.
670 */
671static bool handle_exception_serror_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
672{
673 const unsigned long esr = read_esr_el2();
674
675 if (esr & ESR_EL2_SERROR_IDS_BIT) {
676 /*
677 * Implementation defined content of the esr.
678 */
679 system_abort();
680 }
681
AlexeiFedorov537bee02023-02-02 13:38:23 +0000682 if ((esr & MASK(ESR_EL2_SERROR_DFSC)) != ESR_EL2_SERROR_DFSC_ASYNC) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000683 /*
684 * Either Uncategorized or Reserved fault status code.
685 */
686 system_abort();
687 }
688
AlexeiFedorov537bee02023-02-02 13:38:23 +0000689 switch (esr & MASK(ESR_EL2_SERROR_AET)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000690 case ESR_EL2_SERROR_AET_UEU: /* Unrecoverable RAS Error */
691 case ESR_EL2_SERROR_AET_UER: /* Recoverable RAS Error */
692 /*
693 * The abort is fatal to the current S/W. Inject the SError into
694 * the Realm so it can e.g. shut down gracefully or localize the
695 * problem at the specific EL0 application.
696 *
697 * Note: Consider shutting down the Realm here to avoid
698 * the host's attack on unstable Realms.
699 */
700 inject_serror(rec, esr);
701 /*
702 * Fall through.
703 */
704 case ESR_EL2_SERROR_AET_CE: /* Corrected RAS Error */
705 case ESR_EL2_SERROR_AET_UEO: /* Restartable RAS Error */
706 /*
707 * Report the exception to the host.
708 */
709 rec_exit->esr = esr & ESR_SERROR_MASK;
710 break;
711 case ESR_EL2_SERROR_AET_UC: /* Uncontainable RAS Error */
712 system_abort();
713 break;
714 default:
715 /*
716 * Unrecognized Asynchronous Error Type
717 */
718 assert(false);
719 }
720
721 return false;
722}
723
724static bool handle_exception_irq_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
725{
726 (void)rec;
727
728 rec_exit->exit_reason = RMI_EXIT_IRQ;
729
730 /*
731 * With GIC all virtual interrupt programming
732 * must go via the NS hypervisor.
733 */
734 return false;
735}
736
737/* Returns 'true' when returning to Realm (S) and false when to NS */
738bool handle_realm_exit(struct rec *rec, struct rmi_rec_exit *rec_exit, int exception)
739{
740 switch (exception) {
741 case ARM_EXCEPTION_SYNC_LEL: {
742 bool ret;
743
744 /*
745 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
746 * information.
747 */
748 rec_exit->exit_reason = RMI_EXIT_SYNC;
749 ret = handle_exception_sync(rec, rec_exit);
750 if (!ret) {
751 rec->last_run_info.esr = read_esr_el2();
752 rec->last_run_info.far = read_far_el2();
753 rec->last_run_info.hpfar = read_hpfar_el2();
754 }
755 return ret;
756
757 /*
758 * TODO: Much more detailed handling of exit reasons.
759 */
760 }
761 case ARM_EXCEPTION_IRQ_LEL:
762 return handle_exception_irq_lel(rec, rec_exit);
763 case ARM_EXCEPTION_FIQ_LEL:
764 rec_exit->exit_reason = RMI_EXIT_FIQ;
765 break;
766 case ARM_EXCEPTION_SERROR_LEL: {
767 const unsigned long esr = read_esr_el2();
768 bool ret;
769
770 /*
771 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
772 * information.
773 */
774 rec_exit->exit_reason = RMI_EXIT_SERROR;
775 ret = handle_exception_serror_lel(rec, rec_exit);
776 if (!ret) {
777 rec->last_run_info.esr = esr;
778 rec->last_run_info.far = read_far_el2();
779 rec->last_run_info.hpfar = read_hpfar_el2();
780 }
781 return ret;
782 }
783 default:
784 INFO("Unrecognized exit reason: %d\n", exception);
785 break;
786 };
787
788 return false;
789}