blob: 15b05adec39c147abaced3103839304857aa06bd [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <attestation_token.h>
10#include <buffer.h>
11#include <esr.h>
12#include <exit.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000013#include <gic.h>
14#include <granule.h>
15#include <inject_exp.h>
16#include <memory_alloc.h>
17#include <psci.h>
18#include <realm.h>
19#include <realm_attest.h>
20#include <rec.h>
21#include <rsi-config.h>
22#include <rsi-handler.h>
23#include <rsi-host-call.h>
24#include <rsi-logger.h>
25#include <rsi-memory.h>
26#include <rsi-walk.h>
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +000027#include <run.h>
28#include <simd.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000029#include <smc-rmi.h>
30#include <smc-rsi.h>
31#include <status.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000032#include <sysreg_traps.h>
33#include <table.h>
34
35void save_fpu_state(struct fpu_state *fpu);
36void restore_fpu_state(struct fpu_state *fpu);
37
38static void system_abort(void)
39{
40 /*
41 * TODO: report the abort to the EL3.
42 * We need to establish the exact EL3 API first.
43 */
44 assert(false);
45}
46
47static bool fixup_aarch32_data_abort(struct rec *rec, unsigned long *esr)
48{
49 unsigned long spsr = read_spsr_el2();
50
51 if ((spsr & SPSR_EL2_nRW_AARCH32) != 0UL) {
52 /*
53 * mmio emulation of AArch32 reads/writes is not supported.
54 */
55 *esr &= ~ESR_EL2_ABORT_ISV_BIT;
56 return true;
57 }
58 return false;
59}
60
61static unsigned long get_dabt_write_value(struct rec *rec, unsigned long esr)
62{
63 unsigned int rt = esr_srt(esr);
64
65 /* Handle xzr */
66 if (rt == 31U) {
67 return 0UL;
68 }
69 return rec->regs[rt] & access_mask(esr);
70}
71
72/*
73 * Returns 'true' if access from @rec to @addr is within the Protected IPA space.
74 */
75static bool access_in_rec_par(struct rec *rec, unsigned long addr)
76{
77 /*
78 * It is OK to check only the base address of the access because:
79 * - The Protected IPA space starts at address zero.
80 * - The IPA width is below 64 bits, therefore the access cannot
81 * wrap around.
82 */
83 return addr_in_rec_par(rec, addr);
84}
85
86/*
87 * Returns 'true' if the @ipa is in PAR and its RIPAS is 'empty'.
88 *
89 * @ipa must be aligned to the granule size.
90 */
91static bool ipa_is_empty(unsigned long ipa, struct rec *rec)
92{
93 unsigned long s2tte, *ll_table;
94 struct rtt_walk wi;
95 enum ripas ripas;
96 bool ret;
97
98 assert(GRANULE_ALIGNED(ipa));
99
100 if (!addr_in_rec_par(rec, ipa)) {
101 return false;
102 }
103 granule_lock(rec->realm_info.g_rtt, GRANULE_STATE_RTT);
104
105 rtt_walk_lock_unlock(rec->realm_info.g_rtt,
106 rec->realm_info.s2_starting_level,
107 rec->realm_info.ipa_bits,
108 ipa, RTT_PAGE_LEVEL, &wi);
109
110 ll_table = granule_map(wi.g_llt, SLOT_RTT);
111 s2tte = s2tte_read(&ll_table[wi.index]);
112
113 if (s2tte_is_destroyed(s2tte)) {
114 ret = false;
115 goto out_unmap_ll_table;
116 }
117 ripas = s2tte_get_ripas(s2tte);
Yousuf A62808152022-10-31 10:35:42 +0000118 ret = (ripas == RIPAS_EMPTY);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000119
120out_unmap_ll_table:
121 buffer_unmap(ll_table);
122 granule_unlock(wi.g_llt);
123 return ret;
124}
125
126static bool fsc_is_external_abort(unsigned long fsc)
127{
128 if (fsc == ESR_EL2_ABORT_FSC_SEA) {
129 return true;
130 }
131
132 if ((fsc >= ESR_EL2_ABORT_FSC_SEA_TTW_START) &&
133 (fsc <= ESR_EL2_ABORT_FSC_SEA_TTW_END)) {
134 return true;
135 }
136
137 return false;
138}
139
140/*
141 * Handles Data/Instruction Aborts at a lower EL with External Abort fault
142 * status code (D/IFSC).
143 * Returns 'true' if the exception is the external abort and the `rec_exit`
144 * structure is populated, 'false' otherwise.
145 */
146static bool handle_sync_external_abort(struct rec *rec,
147 struct rmi_rec_exit *rec_exit,
148 unsigned long esr)
149{
AlexeiFedorov537bee02023-02-02 13:38:23 +0000150 unsigned long fsc = esr & MASK(ESR_EL2_ABORT_FSC);
151 unsigned long set = esr & MASK(ESR_EL2_ABORT_SET);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000152
153 if (!fsc_is_external_abort(fsc)) {
154 return false;
155 }
156
157 switch (set) {
158 case ESR_EL2_ABORT_SET_UER:
159 /*
160 * The recoverable SEA.
161 * Inject the sync. abort into the Realm.
162 * Report the exception to the host.
163 */
164 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
165 /*
166 * Fall through.
167 */
168 case ESR_EL2_ABORT_SET_UEO:
169 /*
170 * The restartable SEA.
171 * Report the exception to the host.
172 * The REC restarts the same instruction.
173 */
174 rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
175
176 /*
177 * The value of the HPFAR_EL2 is not provided to the host as
178 * it is undefined for external aborts.
179 *
180 * We also don't provide the content of FAR_EL2 because it
181 * has no practical value to the host without the HPFAR_EL2.
182 */
183 break;
184 case ESR_EL2_ABORT_SET_UC:
185 /*
186 * The uncontainable SEA.
187 * Fatal to the system.
188 */
189 system_abort();
190 break;
191 default:
192 assert(false);
193 }
194
195 return true;
196}
197
198void emulate_stage2_data_abort(struct rec *rec,
199 struct rmi_rec_exit *rec_exit,
200 unsigned long rtt_level)
201{
202 unsigned long fipa = rec->regs[1];
203
204 assert(rtt_level <= RTT_PAGE_LEVEL);
205
206 /*
207 * Setup Exception Syndrom Register to emulate a real data abort
208 * and return to NS host to handle it.
209 */
210 rec_exit->esr = (ESR_EL2_EC_DATA_ABORT |
211 (ESR_EL2_ABORT_FSC_TRANSLATION_FAULT_L0 + rtt_level));
212 rec_exit->far = 0UL;
213 rec_exit->hpfar = fipa >> HPFAR_EL2_FIPA_OFFSET;
214 rec_exit->exit_reason = RMI_EXIT_SYNC;
215}
216
217/*
218 * Returns 'true' if the abort is handled and the RMM should return to the Realm,
219 * and returns 'false' if the exception should be reported to the HS host.
220 */
221static bool handle_data_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
222 unsigned long esr)
223{
224 unsigned long far = 0UL;
225 unsigned long hpfar = read_hpfar_el2();
AlexeiFedorov537bee02023-02-02 13:38:23 +0000226 unsigned long fipa = (hpfar & MASK(HPFAR_EL2_FIPA)) << HPFAR_EL2_FIPA_OFFSET;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000227 unsigned long write_val = 0UL;
228
229 if (handle_sync_external_abort(rec, rec_exit, esr)) {
230 /*
231 * All external aborts are immediately reported to the host.
232 */
233 return false;
234 }
235
236 /*
237 * The memory access that crosses a page boundary may cause two aborts
238 * with `hpfar_el2` values referring to two consecutive pages.
239 *
240 * Insert the SEA and return to the Realm if the granule's RIPAS is EMPTY.
241 */
242 if (ipa_is_empty(fipa, rec)) {
243 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
244 return true;
245 }
246
247 if (fixup_aarch32_data_abort(rec, &esr) ||
248 access_in_rec_par(rec, fipa)) {
249 esr &= ESR_NONEMULATED_ABORT_MASK;
250 goto end;
251 }
252
253 if (esr_is_write(esr)) {
254 write_val = get_dabt_write_value(rec, esr);
255 }
256
257 far = read_far_el2() & ~GRANULE_MASK;
258 esr &= ESR_EMULATED_ABORT_MASK;
259
260end:
261 rec_exit->esr = esr;
262 rec_exit->far = far;
263 rec_exit->hpfar = hpfar;
264 rec_exit->gprs[0] = write_val;
265
266 return false;
267}
268
269/*
270 * Returns 'true' if the abort is handled and the RMM should return to the Realm,
271 * and returns 'false' if the exception should be reported to the NS host.
272 */
273static bool handle_instruction_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
274 unsigned long esr)
275{
AlexeiFedorov537bee02023-02-02 13:38:23 +0000276 unsigned long fsc = esr & MASK(ESR_EL2_ABORT_FSC);
277 unsigned long fsc_type = fsc & ~MASK(ESR_EL2_ABORT_FSC_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000278 unsigned long hpfar = read_hpfar_el2();
AlexeiFedorov537bee02023-02-02 13:38:23 +0000279 unsigned long fipa = (hpfar & MASK(HPFAR_EL2_FIPA)) << HPFAR_EL2_FIPA_OFFSET;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000280
281 if (handle_sync_external_abort(rec, rec_exit, esr)) {
282 /*
283 * All external aborts are immediately reported to the host.
284 */
285 return false;
286 }
287
288 /*
289 * Insert the SEA and return to the Realm if:
290 * - The instruction abort is at an Unprotected IPA, or
291 * - The granule's RIPAS is EMPTY
292 */
293 if (!access_in_rec_par(rec, fipa) || ipa_is_empty(fipa, rec)) {
294 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
295 return true;
296 }
297
298 if (fsc_type != ESR_EL2_ABORT_FSC_TRANSLATION_FAULT) {
299 unsigned long far = read_far_el2();
300
301 /*
302 * TODO: Should this ever happen, or is it an indication of an
303 * internal consistency failure in the RMM which should lead
304 * to a panic instead?
305 */
306
307 ERROR("Unhandled instruction abort:\n");
308 ERROR(" FSC: %12s0x%02lx\n", " ", fsc);
309 ERROR(" FAR: %16lx\n", far);
310 ERROR(" HPFAR: %16lx\n", hpfar);
311 return false;
312 }
313
314 rec_exit->hpfar = hpfar;
315 rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
316
317 return false;
318}
319
320/*
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000321 * Handle FPU or SVE exceptions.
322 * Returns: true if the exception is handled.
323 */
324static bool
325handle_simd_exception(simd_t exp_type, struct rec *rec)
326{
327 /*
328 * If the REC wants to use SVE and if SVE is not enabled for this REC
329 * then inject undefined abort. This can happen when CPU implements
330 * FEAT_SVE but the Realm didn't request this feature during creation.
331 */
332 if (exp_type == SIMD_SVE && rec_simd_type(rec) != SIMD_SVE) {
333 realm_inject_undef_abort();
334 return true;
335 }
336
337 /* FPU or SVE exception can happen only when REC hasn't used SIMD */
338 assert(rec_is_simd_allowed(rec) == false);
339
340 /*
341 * Allow the REC to use SIMD. Save NS SIMD state and restore REC SIMD
342 * state from memory to registers.
343 */
344 simd_save_ns_state();
345 rec_simd_enable_restore(rec);
346
347 /*
348 * Return 'true' indicating that this exception has been handled and
349 * execution can continue.
350 */
351 return true;
352}
353
354/*
Soby Mathewb4c6df42022-11-09 11:13:29 +0000355 * Return 'false' if no IRQ is pending,
356 * return 'true' if there is an IRQ pending, and need to return to host.
357 */
358static bool check_pending_irq(void)
359{
360 unsigned long pending_irq;
361
362 pending_irq = read_isr_el1();
363
364 return (pending_irq != 0UL);
365}
366
367static void advance_pc(void)
368{
369 unsigned long pc = read_elr_el2();
370
371 write_elr_el2(pc + 4UL);
372}
373
374static void return_result_to_realm(struct rec *rec, struct smc_result result)
375{
376 rec->regs[0] = result.x[0];
377 rec->regs[1] = result.x[1];
378 rec->regs[2] = result.x[2];
379 rec->regs[3] = result.x[3];
380}
381
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000382static inline bool rsi_handler_needs_fpu(unsigned int id)
383{
384#ifdef RMM_FPU_USE_AT_REL2
385 if (id == SMC_RSI_ATTEST_TOKEN_CONTINUE ||
386 id == SMC_RSI_MEASUREMENT_EXTEND) {
387 return true;
388 }
389#endif
390 return false;
391}
392
Soby Mathewb4c6df42022-11-09 11:13:29 +0000393/*
394 * Return 'true' if execution should continue in the REC, otherwise return
395 * 'false' to go back to the NS caller of REC.Enter.
396 */
397static bool handle_realm_rsi(struct rec *rec, struct rmi_rec_exit *rec_exit)
398{
399 bool ret_to_rec = true; /* Return to Realm */
Shruti Gupta9debb132022-12-13 14:38:49 +0000400 unsigned int function_id = (unsigned int)rec->regs[0];
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000401 bool restore_rec_simd_state = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000402
AlexeiFedorov6c119692023-04-21 12:31:15 +0100403 RSI_LOG_SET(rec->regs);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000404
Arunachalam Ganapathy937b5492023-02-28 11:17:52 +0000405 /* Ignore SVE hint bit, until RMM supports SVE hint bit */
406 function_id &= ~MASK(SMC_SVE_HINT);
407
Shruti Gupta9debb132022-12-13 14:38:49 +0000408 /* cppcheck-suppress unsignedPositive */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000409 if (!IS_SMC32_PSCI_FID(function_id) && !IS_SMC64_PSCI_FID(function_id)
Arunachalam Ganapathy5c4411b2023-03-06 13:48:14 +0000410 && !IS_SMC64_RSI_FID(function_id)
411 && !(function_id == SMCCC_VERSION)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000412
413 ERROR("Invalid RSI function_id = %x\n", function_id);
414 rec->regs[0] = SMC_UNKNOWN;
415 return true;
416 }
417
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000418 if (rsi_handler_needs_fpu(function_id) == true) {
419 /*
420 * RSI handler uses FPU at REL2, so actively save REC SIMD state
421 * if REC is using SIMD or NS SIMD state. Restore the same before
422 * return from this function.
423 */
424 if (rec_is_simd_allowed(rec)) {
425 rec_simd_save_disable(rec);
426 restore_rec_simd_state = true;
427 } else {
428 simd_save_ns_state();
429 }
430 } else if (rec_is_simd_allowed(rec)) {
431 /*
432 * If the REC is allowed to access SIMD, then we will enter RMM
433 * with SIMD traps disabled. So enable SIMD traps as RMM by
434 * default runs with SIMD traps enabled
435 */
Arunachalam Ganapathy43c2c6b2023-03-24 15:06:34 +0000436 simd_disable();
437 }
438
Soby Mathewb4c6df42022-11-09 11:13:29 +0000439 switch (function_id) {
440 case SMCCC_VERSION:
441 rec->regs[0] = SMCCC_VERSION_NUMBER;
442 break;
443 case SMC_RSI_ABI_VERSION:
444 rec->regs[0] = system_rsi_abi_version();
445 break;
446 case SMC32_PSCI_FID_MIN ... SMC32_PSCI_FID_MAX:
447 case SMC64_PSCI_FID_MIN ... SMC64_PSCI_FID_MAX: {
448 struct psci_result res;
449
450 res = psci_rsi(rec,
451 function_id,
452 rec->regs[1],
453 rec->regs[2],
454 rec->regs[3]);
455
456 if (!rec->psci_info.pending) {
457 rec->regs[0] = res.smc_res.x[0];
458 rec->regs[1] = res.smc_res.x[1];
459 rec->regs[2] = res.smc_res.x[2];
460 rec->regs[3] = res.smc_res.x[3];
461 }
462
463 if (res.hvc_forward.forward_psci_call) {
464 unsigned int i;
465
466 rec_exit->exit_reason = RMI_EXIT_PSCI;
467 rec_exit->gprs[0] = function_id;
468 rec_exit->gprs[1] = res.hvc_forward.x1;
469 rec_exit->gprs[2] = res.hvc_forward.x2;
470 rec_exit->gprs[3] = res.hvc_forward.x3;
471
472 for (i = 4U; i < REC_EXIT_NR_GPRS; i++) {
473 rec_exit->gprs[i] = 0UL;
474 }
475
476 advance_pc();
477 ret_to_rec = false;
478 }
479 break;
480 }
481 case SMC_RSI_ATTEST_TOKEN_INIT:
482 rec->regs[0] = handle_rsi_attest_token_init(rec);
483 break;
484 case SMC_RSI_ATTEST_TOKEN_CONTINUE: {
485 struct attest_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000486 while (true) {
487 /*
488 * Possible outcomes:
489 * if res.incomplete is true
490 * if IRQ pending
491 * check for pending IRQ and return to host
492 * else try a new iteration
493 * else
494 * if RTT table walk has failed,
495 * emulate data abort back to host
496 * otherwise
497 * return to realm because the token
498 * creation is complete or input parameter
499 * validation failed.
500 */
501 handle_rsi_attest_token_continue(rec, &res);
502
503 if (res.incomplete) {
504 if (check_pending_irq()) {
505 rec_exit->exit_reason = RMI_EXIT_IRQ;
506 /* Return to NS host to handle IRQ. */
507 ret_to_rec = false;
508 break;
509 }
510 } else {
511 if (res.walk_result.abort) {
512 emulate_stage2_data_abort(
513 rec, rec_exit,
514 res.walk_result.rtt_level);
515 ret_to_rec = false; /* Exit to Host */
516 break;
517 }
518
519 /* Return to Realm */
520 return_result_to_realm(rec, res.smc_res);
521 break;
522 }
523 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000524 break;
525 }
526 case SMC_RSI_MEASUREMENT_READ:
527 rec->regs[0] = handle_rsi_read_measurement(rec);
528 break;
529 case SMC_RSI_MEASUREMENT_EXTEND:
530 rec->regs[0] = handle_rsi_extend_measurement(rec);
531 break;
532 case SMC_RSI_REALM_CONFIG: {
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000533 struct rsi_walk_smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000534
535 res = handle_rsi_realm_config(rec);
536 if (res.walk_result.abort) {
537 emulate_stage2_data_abort(rec, rec_exit,
538 res.walk_result.rtt_level);
539 ret_to_rec = false; /* Exit to Host */
540 } else {
541 /* Return to Realm */
542 return_result_to_realm(rec, res.smc_res);
543 }
544 break;
545 }
546 case SMC_RSI_IPA_STATE_SET:
547 if (handle_rsi_ipa_state_set(rec, rec_exit)) {
548 rec->regs[0] = RSI_ERROR_INPUT;
549 } else {
550 advance_pc();
551 ret_to_rec = false; /* Return to Host */
552 }
553 break;
554 case SMC_RSI_IPA_STATE_GET: {
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000555 struct rsi_walk_smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000556
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000557 res = handle_rsi_ipa_state_get(rec);
558 if (res.walk_result.abort) {
559 emulate_stage2_data_abort(rec, rec_exit,
560 res.walk_result.rtt_level);
561 /* Exit to Host */
562 ret_to_rec = false;
563 } else {
564 /* Exit to Realm */
565 return_result_to_realm(rec, res.smc_res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000566 }
567 break;
568 }
569 case SMC_RSI_HOST_CALL: {
570 struct rsi_host_call_result res;
571
572 res = handle_rsi_host_call(rec, rec_exit);
573
574 if (res.walk_result.abort) {
575 emulate_stage2_data_abort(rec, rec_exit,
576 res.walk_result.rtt_level);
AlexeiFedorov591967c2022-11-16 17:47:34 +0000577 /* Exit to Host */
578 ret_to_rec = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000579 } else {
580 rec->regs[0] = res.smc_result;
581
582 /*
583 * Return to Realm in case of error,
584 * parent function calls advance_pc()
585 */
586 if (rec->regs[0] == RSI_SUCCESS) {
587 advance_pc();
588
589 /* Exit to Host */
590 rec->host_call = true;
591 rec_exit->exit_reason = RMI_EXIT_HOST_CALL;
592 ret_to_rec = false;
593 }
594 }
595 break;
596 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000597 default:
598 rec->regs[0] = SMC_UNKNOWN;
599 break;
600 }
601
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000602 if (rsi_handler_needs_fpu(function_id) == true) {
603 if (restore_rec_simd_state == true) {
604 rec_simd_enable_restore(rec);
605 } else {
606 simd_restore_ns_state();
607 }
608 } else if (rec_is_simd_allowed(rec)) {
Arunachalam Ganapathy43c2c6b2023-03-24 15:06:34 +0000609 simd_enable(rec_simd_type(rec));
610 }
611
Soby Mathewb4c6df42022-11-09 11:13:29 +0000612 /* Log RSI call */
AlexeiFedorov6c119692023-04-21 12:31:15 +0100613 RSI_LOG_EXIT(function_id, rec->regs, ret_to_rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000614 return ret_to_rec;
615}
616
617/*
618 * Return 'true' if the RMM handled the exception,
619 * 'false' to return to the Non-secure host.
620 */
621static bool handle_exception_sync(struct rec *rec, struct rmi_rec_exit *rec_exit)
622{
623 const unsigned long esr = read_esr_el2();
624
AlexeiFedorov537bee02023-02-02 13:38:23 +0000625 switch (esr & MASK(ESR_EL2_EC)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000626 case ESR_EL2_EC_WFX:
AlexeiFedorov537bee02023-02-02 13:38:23 +0000627 rec_exit->esr = esr & (MASK(ESR_EL2_EC) | ESR_EL2_WFx_TI_BIT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000628 advance_pc();
629 return false;
630 case ESR_EL2_EC_HVC:
631 realm_inject_undef_abort();
632 return true;
633 case ESR_EL2_EC_SMC:
634 if (!handle_realm_rsi(rec, rec_exit)) {
635 return false;
636 }
637 /*
638 * Advance PC.
639 * HCR_EL2.TSC traps execution of the SMC instruction.
640 * It is not a routing control for the SMC exception.
641 * Trap exceptions and SMC exceptions have different
642 * preferred return addresses.
643 */
644 advance_pc();
645 return true;
646 case ESR_EL2_EC_SYSREG: {
647 bool ret = handle_sysreg_access_trap(rec, rec_exit, esr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000648 advance_pc();
649 return ret;
650 }
651 case ESR_EL2_EC_INST_ABORT:
652 return handle_instruction_abort(rec, rec_exit, esr);
653 case ESR_EL2_EC_DATA_ABORT:
654 return handle_data_abort(rec, rec_exit, esr);
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000655 case ESR_EL2_EC_FPU:
656 return handle_simd_exception(SIMD_FPU, rec);
657 case ESR_EL2_EC_SVE:
658 return handle_simd_exception(SIMD_SVE, rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000659 default:
660 /*
661 * TODO: Check if there are other exit reasons we could
662 * encounter here and handle them appropriately
663 */
664 break;
665 }
666
667 VERBOSE("Unhandled sync exit ESR: %08lx (EC: %lx ISS: %lx)\n",
AlexeiFedorov537bee02023-02-02 13:38:23 +0000668 esr, EXTRACT(ESR_EL2_EC, esr), EXTRACT(ESR_EL2_ISS, esr));
Soby Mathewb4c6df42022-11-09 11:13:29 +0000669
670 /*
671 * Zero values in esr, far & hpfar of 'rec_exit' structure
672 * will be returned to the NS host.
673 * The only information that may leak is when there was
674 * some unhandled/unknown reason for the exception.
675 */
676 return false;
677}
678
679/*
680 * Return 'true' if the RMM handled the exception, 'false' to return to the
681 * Non-secure host.
682 */
683static bool handle_exception_serror_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
684{
685 const unsigned long esr = read_esr_el2();
686
687 if (esr & ESR_EL2_SERROR_IDS_BIT) {
688 /*
689 * Implementation defined content of the esr.
690 */
691 system_abort();
692 }
693
AlexeiFedorov537bee02023-02-02 13:38:23 +0000694 if ((esr & MASK(ESR_EL2_SERROR_DFSC)) != ESR_EL2_SERROR_DFSC_ASYNC) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000695 /*
696 * Either Uncategorized or Reserved fault status code.
697 */
698 system_abort();
699 }
700
AlexeiFedorov537bee02023-02-02 13:38:23 +0000701 switch (esr & MASK(ESR_EL2_SERROR_AET)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000702 case ESR_EL2_SERROR_AET_UEU: /* Unrecoverable RAS Error */
703 case ESR_EL2_SERROR_AET_UER: /* Recoverable RAS Error */
704 /*
705 * The abort is fatal to the current S/W. Inject the SError into
706 * the Realm so it can e.g. shut down gracefully or localize the
707 * problem at the specific EL0 application.
708 *
709 * Note: Consider shutting down the Realm here to avoid
710 * the host's attack on unstable Realms.
711 */
712 inject_serror(rec, esr);
713 /*
714 * Fall through.
715 */
716 case ESR_EL2_SERROR_AET_CE: /* Corrected RAS Error */
717 case ESR_EL2_SERROR_AET_UEO: /* Restartable RAS Error */
718 /*
719 * Report the exception to the host.
720 */
721 rec_exit->esr = esr & ESR_SERROR_MASK;
722 break;
723 case ESR_EL2_SERROR_AET_UC: /* Uncontainable RAS Error */
724 system_abort();
725 break;
726 default:
727 /*
728 * Unrecognized Asynchronous Error Type
729 */
730 assert(false);
731 }
732
733 return false;
734}
735
736static bool handle_exception_irq_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
737{
738 (void)rec;
739
740 rec_exit->exit_reason = RMI_EXIT_IRQ;
741
742 /*
743 * With GIC all virtual interrupt programming
744 * must go via the NS hypervisor.
745 */
746 return false;
747}
748
749/* Returns 'true' when returning to Realm (S) and false when to NS */
750bool handle_realm_exit(struct rec *rec, struct rmi_rec_exit *rec_exit, int exception)
751{
752 switch (exception) {
753 case ARM_EXCEPTION_SYNC_LEL: {
754 bool ret;
755
756 /*
757 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
758 * information.
759 */
760 rec_exit->exit_reason = RMI_EXIT_SYNC;
761 ret = handle_exception_sync(rec, rec_exit);
762 if (!ret) {
763 rec->last_run_info.esr = read_esr_el2();
764 rec->last_run_info.far = read_far_el2();
765 rec->last_run_info.hpfar = read_hpfar_el2();
766 }
767 return ret;
768
769 /*
770 * TODO: Much more detailed handling of exit reasons.
771 */
772 }
773 case ARM_EXCEPTION_IRQ_LEL:
774 return handle_exception_irq_lel(rec, rec_exit);
775 case ARM_EXCEPTION_FIQ_LEL:
776 rec_exit->exit_reason = RMI_EXIT_FIQ;
777 break;
778 case ARM_EXCEPTION_SERROR_LEL: {
779 const unsigned long esr = read_esr_el2();
780 bool ret;
781
782 /*
783 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
784 * information.
785 */
786 rec_exit->exit_reason = RMI_EXIT_SERROR;
787 ret = handle_exception_serror_lel(rec, rec_exit);
788 if (!ret) {
789 rec->last_run_info.esr = esr;
790 rec->last_run_info.far = read_far_el2();
791 rec->last_run_info.hpfar = read_hpfar_el2();
792 }
793 return ret;
794 }
795 default:
796 INFO("Unrecognized exit reason: %d\n", exception);
797 break;
798 };
799
800 return false;
801}