blob: cd42e20ef7ece20c7a3a77c7c134aeed488b6122 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +00009#include <buffer.h>
10#include <esr.h>
11#include <exit.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000012#include <gic.h>
13#include <granule.h>
14#include <inject_exp.h>
15#include <memory_alloc.h>
16#include <psci.h>
17#include <realm.h>
18#include <realm_attest.h>
19#include <rec.h>
20#include <rsi-config.h>
21#include <rsi-handler.h>
22#include <rsi-host-call.h>
23#include <rsi-logger.h>
24#include <rsi-memory.h>
25#include <rsi-walk.h>
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +000026#include <run.h>
27#include <simd.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000028#include <smc-rmi.h>
29#include <smc-rsi.h>
30#include <status.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000031#include <sysreg_traps.h>
32#include <table.h>
33
34void save_fpu_state(struct fpu_state *fpu);
35void restore_fpu_state(struct fpu_state *fpu);
36
37static void system_abort(void)
38{
39 /*
40 * TODO: report the abort to the EL3.
41 * We need to establish the exact EL3 API first.
42 */
43 assert(false);
44}
45
46static bool fixup_aarch32_data_abort(struct rec *rec, unsigned long *esr)
47{
48 unsigned long spsr = read_spsr_el2();
49
50 if ((spsr & SPSR_EL2_nRW_AARCH32) != 0UL) {
51 /*
52 * mmio emulation of AArch32 reads/writes is not supported.
53 */
54 *esr &= ~ESR_EL2_ABORT_ISV_BIT;
55 return true;
56 }
57 return false;
58}
59
60static unsigned long get_dabt_write_value(struct rec *rec, unsigned long esr)
61{
62 unsigned int rt = esr_srt(esr);
63
64 /* Handle xzr */
65 if (rt == 31U) {
66 return 0UL;
67 }
68 return rec->regs[rt] & access_mask(esr);
69}
70
71/*
72 * Returns 'true' if access from @rec to @addr is within the Protected IPA space.
73 */
74static bool access_in_rec_par(struct rec *rec, unsigned long addr)
75{
76 /*
77 * It is OK to check only the base address of the access because:
78 * - The Protected IPA space starts at address zero.
79 * - The IPA width is below 64 bits, therefore the access cannot
80 * wrap around.
81 */
82 return addr_in_rec_par(rec, addr);
83}
84
85/*
86 * Returns 'true' if the @ipa is in PAR and its RIPAS is 'empty'.
87 *
88 * @ipa must be aligned to the granule size.
89 */
90static bool ipa_is_empty(unsigned long ipa, struct rec *rec)
91{
92 unsigned long s2tte, *ll_table;
93 struct rtt_walk wi;
AlexeiFedorov0fb44552023-04-14 15:37:58 +010094 enum ripas ripas_val;
Soby Mathewb4c6df42022-11-09 11:13:29 +000095 bool ret;
96
97 assert(GRANULE_ALIGNED(ipa));
98
99 if (!addr_in_rec_par(rec, ipa)) {
100 return false;
101 }
102 granule_lock(rec->realm_info.g_rtt, GRANULE_STATE_RTT);
103
104 rtt_walk_lock_unlock(rec->realm_info.g_rtt,
105 rec->realm_info.s2_starting_level,
106 rec->realm_info.ipa_bits,
107 ipa, RTT_PAGE_LEVEL, &wi);
108
109 ll_table = granule_map(wi.g_llt, SLOT_RTT);
110 s2tte = s2tte_read(&ll_table[wi.index]);
111
112 if (s2tte_is_destroyed(s2tte)) {
113 ret = false;
114 goto out_unmap_ll_table;
115 }
AlexeiFedorov0fb44552023-04-14 15:37:58 +0100116 ripas_val = s2tte_get_ripas(s2tte);
117 ret = (ripas_val == RIPAS_EMPTY);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000118
119out_unmap_ll_table:
120 buffer_unmap(ll_table);
121 granule_unlock(wi.g_llt);
122 return ret;
123}
124
125static bool fsc_is_external_abort(unsigned long fsc)
126{
127 if (fsc == ESR_EL2_ABORT_FSC_SEA) {
128 return true;
129 }
130
131 if ((fsc >= ESR_EL2_ABORT_FSC_SEA_TTW_START) &&
132 (fsc <= ESR_EL2_ABORT_FSC_SEA_TTW_END)) {
133 return true;
134 }
135
136 return false;
137}
138
139/*
140 * Handles Data/Instruction Aborts at a lower EL with External Abort fault
141 * status code (D/IFSC).
142 * Returns 'true' if the exception is the external abort and the `rec_exit`
143 * structure is populated, 'false' otherwise.
144 */
145static bool handle_sync_external_abort(struct rec *rec,
146 struct rmi_rec_exit *rec_exit,
147 unsigned long esr)
148{
AlexeiFedorov537bee02023-02-02 13:38:23 +0000149 unsigned long fsc = esr & MASK(ESR_EL2_ABORT_FSC);
150 unsigned long set = esr & MASK(ESR_EL2_ABORT_SET);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000151
152 if (!fsc_is_external_abort(fsc)) {
153 return false;
154 }
155
156 switch (set) {
157 case ESR_EL2_ABORT_SET_UER:
158 /*
159 * The recoverable SEA.
160 * Inject the sync. abort into the Realm.
161 * Report the exception to the host.
162 */
163 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
164 /*
165 * Fall through.
166 */
167 case ESR_EL2_ABORT_SET_UEO:
168 /*
169 * The restartable SEA.
170 * Report the exception to the host.
171 * The REC restarts the same instruction.
172 */
173 rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
174
175 /*
176 * The value of the HPFAR_EL2 is not provided to the host as
177 * it is undefined for external aborts.
178 *
179 * We also don't provide the content of FAR_EL2 because it
180 * has no practical value to the host without the HPFAR_EL2.
181 */
182 break;
183 case ESR_EL2_ABORT_SET_UC:
184 /*
185 * The uncontainable SEA.
186 * Fatal to the system.
187 */
188 system_abort();
189 break;
190 default:
191 assert(false);
192 }
193
194 return true;
195}
196
197void emulate_stage2_data_abort(struct rec *rec,
198 struct rmi_rec_exit *rec_exit,
199 unsigned long rtt_level)
200{
201 unsigned long fipa = rec->regs[1];
202
203 assert(rtt_level <= RTT_PAGE_LEVEL);
204
205 /*
206 * Setup Exception Syndrom Register to emulate a real data abort
207 * and return to NS host to handle it.
208 */
209 rec_exit->esr = (ESR_EL2_EC_DATA_ABORT |
210 (ESR_EL2_ABORT_FSC_TRANSLATION_FAULT_L0 + rtt_level));
211 rec_exit->far = 0UL;
212 rec_exit->hpfar = fipa >> HPFAR_EL2_FIPA_OFFSET;
213 rec_exit->exit_reason = RMI_EXIT_SYNC;
214}
215
216/*
217 * Returns 'true' if the abort is handled and the RMM should return to the Realm,
218 * and returns 'false' if the exception should be reported to the HS host.
219 */
220static bool handle_data_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
221 unsigned long esr)
222{
223 unsigned long far = 0UL;
224 unsigned long hpfar = read_hpfar_el2();
AlexeiFedorov537bee02023-02-02 13:38:23 +0000225 unsigned long fipa = (hpfar & MASK(HPFAR_EL2_FIPA)) << HPFAR_EL2_FIPA_OFFSET;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000226 unsigned long write_val = 0UL;
227
228 if (handle_sync_external_abort(rec, rec_exit, esr)) {
229 /*
230 * All external aborts are immediately reported to the host.
231 */
232 return false;
233 }
234
235 /*
236 * The memory access that crosses a page boundary may cause two aborts
237 * with `hpfar_el2` values referring to two consecutive pages.
238 *
239 * Insert the SEA and return to the Realm if the granule's RIPAS is EMPTY.
240 */
241 if (ipa_is_empty(fipa, rec)) {
242 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
243 return true;
244 }
245
246 if (fixup_aarch32_data_abort(rec, &esr) ||
247 access_in_rec_par(rec, fipa)) {
248 esr &= ESR_NONEMULATED_ABORT_MASK;
249 goto end;
250 }
251
252 if (esr_is_write(esr)) {
253 write_val = get_dabt_write_value(rec, esr);
254 }
255
256 far = read_far_el2() & ~GRANULE_MASK;
257 esr &= ESR_EMULATED_ABORT_MASK;
258
259end:
260 rec_exit->esr = esr;
261 rec_exit->far = far;
262 rec_exit->hpfar = hpfar;
263 rec_exit->gprs[0] = write_val;
264
265 return false;
266}
267
268/*
269 * Returns 'true' if the abort is handled and the RMM should return to the Realm,
270 * and returns 'false' if the exception should be reported to the NS host.
271 */
272static bool handle_instruction_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
273 unsigned long esr)
274{
AlexeiFedorov537bee02023-02-02 13:38:23 +0000275 unsigned long fsc = esr & MASK(ESR_EL2_ABORT_FSC);
276 unsigned long fsc_type = fsc & ~MASK(ESR_EL2_ABORT_FSC_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000277 unsigned long hpfar = read_hpfar_el2();
AlexeiFedorov537bee02023-02-02 13:38:23 +0000278 unsigned long fipa = (hpfar & MASK(HPFAR_EL2_FIPA)) << HPFAR_EL2_FIPA_OFFSET;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000279
280 if (handle_sync_external_abort(rec, rec_exit, esr)) {
281 /*
282 * All external aborts are immediately reported to the host.
283 */
284 return false;
285 }
286
287 /*
288 * Insert the SEA and return to the Realm if:
289 * - The instruction abort is at an Unprotected IPA, or
290 * - The granule's RIPAS is EMPTY
291 */
292 if (!access_in_rec_par(rec, fipa) || ipa_is_empty(fipa, rec)) {
293 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
294 return true;
295 }
296
297 if (fsc_type != ESR_EL2_ABORT_FSC_TRANSLATION_FAULT) {
298 unsigned long far = read_far_el2();
299
300 /*
301 * TODO: Should this ever happen, or is it an indication of an
302 * internal consistency failure in the RMM which should lead
303 * to a panic instead?
304 */
305
306 ERROR("Unhandled instruction abort:\n");
307 ERROR(" FSC: %12s0x%02lx\n", " ", fsc);
308 ERROR(" FAR: %16lx\n", far);
309 ERROR(" HPFAR: %16lx\n", hpfar);
310 return false;
311 }
312
313 rec_exit->hpfar = hpfar;
314 rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
315
316 return false;
317}
318
319/*
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000320 * Handle FPU or SVE exceptions.
321 * Returns: true if the exception is handled.
322 */
323static bool
324handle_simd_exception(simd_t exp_type, struct rec *rec)
325{
326 /*
327 * If the REC wants to use SVE and if SVE is not enabled for this REC
328 * then inject undefined abort. This can happen when CPU implements
329 * FEAT_SVE but the Realm didn't request this feature during creation.
330 */
331 if (exp_type == SIMD_SVE && rec_simd_type(rec) != SIMD_SVE) {
332 realm_inject_undef_abort();
333 return true;
334 }
335
336 /* FPU or SVE exception can happen only when REC hasn't used SIMD */
337 assert(rec_is_simd_allowed(rec) == false);
338
339 /*
340 * Allow the REC to use SIMD. Save NS SIMD state and restore REC SIMD
341 * state from memory to registers.
342 */
343 simd_save_ns_state();
344 rec_simd_enable_restore(rec);
345
346 /*
347 * Return 'true' indicating that this exception has been handled and
348 * execution can continue.
349 */
350 return true;
351}
352
353/*
Soby Mathewb4c6df42022-11-09 11:13:29 +0000354 * Return 'false' if no IRQ is pending,
355 * return 'true' if there is an IRQ pending, and need to return to host.
356 */
357static bool check_pending_irq(void)
358{
359 unsigned long pending_irq;
360
361 pending_irq = read_isr_el1();
362
363 return (pending_irq != 0UL);
364}
365
366static void advance_pc(void)
367{
368 unsigned long pc = read_elr_el2();
369
370 write_elr_el2(pc + 4UL);
371}
372
373static void return_result_to_realm(struct rec *rec, struct smc_result result)
374{
375 rec->regs[0] = result.x[0];
376 rec->regs[1] = result.x[1];
377 rec->regs[2] = result.x[2];
378 rec->regs[3] = result.x[3];
379}
380
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000381static inline bool rsi_handler_needs_fpu(unsigned int id)
382{
383#ifdef RMM_FPU_USE_AT_REL2
384 if (id == SMC_RSI_ATTEST_TOKEN_CONTINUE ||
385 id == SMC_RSI_MEASUREMENT_EXTEND) {
386 return true;
387 }
388#endif
389 return false;
390}
391
Soby Mathewb4c6df42022-11-09 11:13:29 +0000392/*
393 * Return 'true' if execution should continue in the REC, otherwise return
394 * 'false' to go back to the NS caller of REC.Enter.
395 */
396static bool handle_realm_rsi(struct rec *rec, struct rmi_rec_exit *rec_exit)
397{
398 bool ret_to_rec = true; /* Return to Realm */
Shruti Gupta9debb132022-12-13 14:38:49 +0000399 unsigned int function_id = (unsigned int)rec->regs[0];
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000400 bool restore_rec_simd_state = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000401
AlexeiFedorov6c119692023-04-21 12:31:15 +0100402 RSI_LOG_SET(rec->regs);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000403
Arunachalam Ganapathy937b5492023-02-28 11:17:52 +0000404 /* Ignore SVE hint bit, until RMM supports SVE hint bit */
405 function_id &= ~MASK(SMC_SVE_HINT);
406
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000407 if (rsi_handler_needs_fpu(function_id) == true) {
408 /*
409 * RSI handler uses FPU at REL2, so actively save REC SIMD state
410 * if REC is using SIMD or NS SIMD state. Restore the same before
411 * return from this function.
412 */
413 if (rec_is_simd_allowed(rec)) {
414 rec_simd_save_disable(rec);
415 restore_rec_simd_state = true;
416 } else {
417 simd_save_ns_state();
418 }
419 } else if (rec_is_simd_allowed(rec)) {
420 /*
421 * If the REC is allowed to access SIMD, then we will enter RMM
422 * with SIMD traps disabled. So enable SIMD traps as RMM by
423 * default runs with SIMD traps enabled
424 */
Arunachalam Ganapathy43c2c6b2023-03-24 15:06:34 +0000425 simd_disable();
426 }
427
Soby Mathewb4c6df42022-11-09 11:13:29 +0000428 switch (function_id) {
429 case SMCCC_VERSION:
430 rec->regs[0] = SMCCC_VERSION_NUMBER;
431 break;
432 case SMC_RSI_ABI_VERSION:
433 rec->regs[0] = system_rsi_abi_version();
434 break;
435 case SMC32_PSCI_FID_MIN ... SMC32_PSCI_FID_MAX:
436 case SMC64_PSCI_FID_MIN ... SMC64_PSCI_FID_MAX: {
437 struct psci_result res;
438
439 res = psci_rsi(rec,
440 function_id,
441 rec->regs[1],
442 rec->regs[2],
443 rec->regs[3]);
444
445 if (!rec->psci_info.pending) {
446 rec->regs[0] = res.smc_res.x[0];
447 rec->regs[1] = res.smc_res.x[1];
448 rec->regs[2] = res.smc_res.x[2];
449 rec->regs[3] = res.smc_res.x[3];
450 }
451
452 if (res.hvc_forward.forward_psci_call) {
453 unsigned int i;
454
455 rec_exit->exit_reason = RMI_EXIT_PSCI;
456 rec_exit->gprs[0] = function_id;
457 rec_exit->gprs[1] = res.hvc_forward.x1;
458 rec_exit->gprs[2] = res.hvc_forward.x2;
459 rec_exit->gprs[3] = res.hvc_forward.x3;
460
461 for (i = 4U; i < REC_EXIT_NR_GPRS; i++) {
462 rec_exit->gprs[i] = 0UL;
463 }
464
465 advance_pc();
466 ret_to_rec = false;
467 }
468 break;
469 }
470 case SMC_RSI_ATTEST_TOKEN_INIT:
471 rec->regs[0] = handle_rsi_attest_token_init(rec);
472 break;
473 case SMC_RSI_ATTEST_TOKEN_CONTINUE: {
474 struct attest_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000475 while (true) {
476 /*
477 * Possible outcomes:
478 * if res.incomplete is true
479 * if IRQ pending
480 * check for pending IRQ and return to host
481 * else try a new iteration
482 * else
483 * if RTT table walk has failed,
484 * emulate data abort back to host
485 * otherwise
486 * return to realm because the token
487 * creation is complete or input parameter
488 * validation failed.
489 */
490 handle_rsi_attest_token_continue(rec, &res);
491
492 if (res.incomplete) {
493 if (check_pending_irq()) {
494 rec_exit->exit_reason = RMI_EXIT_IRQ;
Soby Mathew1be30212023-05-16 15:06:59 +0100495
496 /* Copy the result to rec prior to return to host */
497 return_result_to_realm(rec, res.smc_res);
498 advance_pc();
499
Soby Mathewb4c6df42022-11-09 11:13:29 +0000500 /* Return to NS host to handle IRQ. */
501 ret_to_rec = false;
502 break;
503 }
504 } else {
505 if (res.walk_result.abort) {
506 emulate_stage2_data_abort(
507 rec, rec_exit,
508 res.walk_result.rtt_level);
509 ret_to_rec = false; /* Exit to Host */
510 break;
511 }
512
513 /* Return to Realm */
514 return_result_to_realm(rec, res.smc_res);
515 break;
516 }
517 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000518 break;
519 }
520 case SMC_RSI_MEASUREMENT_READ:
521 rec->regs[0] = handle_rsi_read_measurement(rec);
522 break;
523 case SMC_RSI_MEASUREMENT_EXTEND:
524 rec->regs[0] = handle_rsi_extend_measurement(rec);
525 break;
526 case SMC_RSI_REALM_CONFIG: {
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000527 struct rsi_walk_smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000528
529 res = handle_rsi_realm_config(rec);
530 if (res.walk_result.abort) {
531 emulate_stage2_data_abort(rec, rec_exit,
532 res.walk_result.rtt_level);
533 ret_to_rec = false; /* Exit to Host */
534 } else {
535 /* Return to Realm */
536 return_result_to_realm(rec, res.smc_res);
537 }
538 break;
539 }
540 case SMC_RSI_IPA_STATE_SET:
541 if (handle_rsi_ipa_state_set(rec, rec_exit)) {
542 rec->regs[0] = RSI_ERROR_INPUT;
543 } else {
544 advance_pc();
545 ret_to_rec = false; /* Return to Host */
546 }
547 break;
548 case SMC_RSI_IPA_STATE_GET: {
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000549 struct rsi_walk_smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000550
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000551 res = handle_rsi_ipa_state_get(rec);
552 if (res.walk_result.abort) {
553 emulate_stage2_data_abort(rec, rec_exit,
554 res.walk_result.rtt_level);
555 /* Exit to Host */
556 ret_to_rec = false;
557 } else {
558 /* Exit to Realm */
559 return_result_to_realm(rec, res.smc_res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000560 }
561 break;
562 }
563 case SMC_RSI_HOST_CALL: {
564 struct rsi_host_call_result res;
565
566 res = handle_rsi_host_call(rec, rec_exit);
567
568 if (res.walk_result.abort) {
569 emulate_stage2_data_abort(rec, rec_exit,
570 res.walk_result.rtt_level);
AlexeiFedorov591967c2022-11-16 17:47:34 +0000571 /* Exit to Host */
572 ret_to_rec = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000573 } else {
574 rec->regs[0] = res.smc_result;
575
576 /*
577 * Return to Realm in case of error,
578 * parent function calls advance_pc()
579 */
580 if (rec->regs[0] == RSI_SUCCESS) {
581 advance_pc();
582
583 /* Exit to Host */
584 rec->host_call = true;
585 rec_exit->exit_reason = RMI_EXIT_HOST_CALL;
586 ret_to_rec = false;
587 }
588 }
589 break;
590 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000591 default:
592 rec->regs[0] = SMC_UNKNOWN;
593 break;
594 }
595
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000596 if (rsi_handler_needs_fpu(function_id) == true) {
597 if (restore_rec_simd_state == true) {
598 rec_simd_enable_restore(rec);
599 } else {
600 simd_restore_ns_state();
601 }
602 } else if (rec_is_simd_allowed(rec)) {
Arunachalam Ganapathy43c2c6b2023-03-24 15:06:34 +0000603 simd_enable(rec_simd_type(rec));
604 }
605
Soby Mathewb4c6df42022-11-09 11:13:29 +0000606 /* Log RSI call */
AlexeiFedorov6c119692023-04-21 12:31:15 +0100607 RSI_LOG_EXIT(function_id, rec->regs, ret_to_rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000608 return ret_to_rec;
609}
610
611/*
612 * Return 'true' if the RMM handled the exception,
613 * 'false' to return to the Non-secure host.
614 */
615static bool handle_exception_sync(struct rec *rec, struct rmi_rec_exit *rec_exit)
616{
617 const unsigned long esr = read_esr_el2();
618
AlexeiFedorov537bee02023-02-02 13:38:23 +0000619 switch (esr & MASK(ESR_EL2_EC)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000620 case ESR_EL2_EC_WFX:
AlexeiFedorov537bee02023-02-02 13:38:23 +0000621 rec_exit->esr = esr & (MASK(ESR_EL2_EC) | ESR_EL2_WFx_TI_BIT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000622 advance_pc();
623 return false;
624 case ESR_EL2_EC_HVC:
625 realm_inject_undef_abort();
626 return true;
627 case ESR_EL2_EC_SMC:
628 if (!handle_realm_rsi(rec, rec_exit)) {
629 return false;
630 }
631 /*
632 * Advance PC.
633 * HCR_EL2.TSC traps execution of the SMC instruction.
634 * It is not a routing control for the SMC exception.
635 * Trap exceptions and SMC exceptions have different
636 * preferred return addresses.
637 */
638 advance_pc();
639 return true;
640 case ESR_EL2_EC_SYSREG: {
641 bool ret = handle_sysreg_access_trap(rec, rec_exit, esr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000642 advance_pc();
643 return ret;
644 }
645 case ESR_EL2_EC_INST_ABORT:
646 return handle_instruction_abort(rec, rec_exit, esr);
647 case ESR_EL2_EC_DATA_ABORT:
648 return handle_data_abort(rec, rec_exit, esr);
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000649 case ESR_EL2_EC_FPU:
650 return handle_simd_exception(SIMD_FPU, rec);
651 case ESR_EL2_EC_SVE:
652 return handle_simd_exception(SIMD_SVE, rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000653 default:
654 /*
655 * TODO: Check if there are other exit reasons we could
656 * encounter here and handle them appropriately
657 */
658 break;
659 }
660
661 VERBOSE("Unhandled sync exit ESR: %08lx (EC: %lx ISS: %lx)\n",
AlexeiFedorov537bee02023-02-02 13:38:23 +0000662 esr, EXTRACT(ESR_EL2_EC, esr), EXTRACT(ESR_EL2_ISS, esr));
Soby Mathewb4c6df42022-11-09 11:13:29 +0000663
664 /*
665 * Zero values in esr, far & hpfar of 'rec_exit' structure
666 * will be returned to the NS host.
667 * The only information that may leak is when there was
668 * some unhandled/unknown reason for the exception.
669 */
670 return false;
671}
672
673/*
674 * Return 'true' if the RMM handled the exception, 'false' to return to the
675 * Non-secure host.
676 */
677static bool handle_exception_serror_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
678{
679 const unsigned long esr = read_esr_el2();
680
681 if (esr & ESR_EL2_SERROR_IDS_BIT) {
682 /*
683 * Implementation defined content of the esr.
684 */
685 system_abort();
686 }
687
AlexeiFedorov537bee02023-02-02 13:38:23 +0000688 if ((esr & MASK(ESR_EL2_SERROR_DFSC)) != ESR_EL2_SERROR_DFSC_ASYNC) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000689 /*
690 * Either Uncategorized or Reserved fault status code.
691 */
692 system_abort();
693 }
694
AlexeiFedorov537bee02023-02-02 13:38:23 +0000695 switch (esr & MASK(ESR_EL2_SERROR_AET)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000696 case ESR_EL2_SERROR_AET_UEU: /* Unrecoverable RAS Error */
697 case ESR_EL2_SERROR_AET_UER: /* Recoverable RAS Error */
698 /*
699 * The abort is fatal to the current S/W. Inject the SError into
700 * the Realm so it can e.g. shut down gracefully or localize the
701 * problem at the specific EL0 application.
702 *
703 * Note: Consider shutting down the Realm here to avoid
704 * the host's attack on unstable Realms.
705 */
706 inject_serror(rec, esr);
707 /*
708 * Fall through.
709 */
710 case ESR_EL2_SERROR_AET_CE: /* Corrected RAS Error */
711 case ESR_EL2_SERROR_AET_UEO: /* Restartable RAS Error */
712 /*
713 * Report the exception to the host.
714 */
715 rec_exit->esr = esr & ESR_SERROR_MASK;
716 break;
717 case ESR_EL2_SERROR_AET_UC: /* Uncontainable RAS Error */
718 system_abort();
719 break;
720 default:
721 /*
722 * Unrecognized Asynchronous Error Type
723 */
724 assert(false);
725 }
726
727 return false;
728}
729
730static bool handle_exception_irq_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
731{
732 (void)rec;
733
734 rec_exit->exit_reason = RMI_EXIT_IRQ;
735
736 /*
737 * With GIC all virtual interrupt programming
738 * must go via the NS hypervisor.
739 */
740 return false;
741}
742
743/* Returns 'true' when returning to Realm (S) and false when to NS */
744bool handle_realm_exit(struct rec *rec, struct rmi_rec_exit *rec_exit, int exception)
745{
746 switch (exception) {
747 case ARM_EXCEPTION_SYNC_LEL: {
748 bool ret;
749
750 /*
751 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
752 * information.
753 */
754 rec_exit->exit_reason = RMI_EXIT_SYNC;
755 ret = handle_exception_sync(rec, rec_exit);
756 if (!ret) {
757 rec->last_run_info.esr = read_esr_el2();
758 rec->last_run_info.far = read_far_el2();
759 rec->last_run_info.hpfar = read_hpfar_el2();
760 }
761 return ret;
762
763 /*
764 * TODO: Much more detailed handling of exit reasons.
765 */
766 }
767 case ARM_EXCEPTION_IRQ_LEL:
768 return handle_exception_irq_lel(rec, rec_exit);
769 case ARM_EXCEPTION_FIQ_LEL:
770 rec_exit->exit_reason = RMI_EXIT_FIQ;
771 break;
772 case ARM_EXCEPTION_SERROR_LEL: {
773 const unsigned long esr = read_esr_el2();
774 bool ret;
775
776 /*
777 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
778 * information.
779 */
780 rec_exit->exit_reason = RMI_EXIT_SERROR;
781 ret = handle_exception_serror_lel(rec, rec_exit);
782 if (!ret) {
783 rec->last_run_info.esr = esr;
784 rec->last_run_info.far = read_far_el2();
785 rec->last_run_info.hpfar = read_hpfar_el2();
786 }
787 return ret;
788 }
789 default:
790 INFO("Unrecognized exit reason: %d\n", exception);
791 break;
792 };
793
794 return false;
795}