blob: 1bc5c22d769b3de88a0122008a5a76fdcfd65fda [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +00009#include <buffer.h>
10#include <esr.h>
11#include <exit.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000012#include <gic.h>
13#include <granule.h>
14#include <inject_exp.h>
15#include <memory_alloc.h>
16#include <psci.h>
17#include <realm.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000018#include <rec.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000019#include <rsi-handler.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000020#include <rsi-logger.h>
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +000021#include <run.h>
22#include <simd.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000023#include <smc-rmi.h>
24#include <smc-rsi.h>
25#include <status.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000026#include <sysreg_traps.h>
27#include <table.h>
28
29void save_fpu_state(struct fpu_state *fpu);
30void restore_fpu_state(struct fpu_state *fpu);
31
32static void system_abort(void)
33{
34 /*
35 * TODO: report the abort to the EL3.
36 * We need to establish the exact EL3 API first.
37 */
38 assert(false);
39}
40
41static bool fixup_aarch32_data_abort(struct rec *rec, unsigned long *esr)
42{
43 unsigned long spsr = read_spsr_el2();
44
45 if ((spsr & SPSR_EL2_nRW_AARCH32) != 0UL) {
46 /*
47 * mmio emulation of AArch32 reads/writes is not supported.
48 */
49 *esr &= ~ESR_EL2_ABORT_ISV_BIT;
50 return true;
51 }
52 return false;
53}
54
55static unsigned long get_dabt_write_value(struct rec *rec, unsigned long esr)
56{
57 unsigned int rt = esr_srt(esr);
58
59 /* Handle xzr */
60 if (rt == 31U) {
61 return 0UL;
62 }
63 return rec->regs[rt] & access_mask(esr);
64}
65
66/*
67 * Returns 'true' if access from @rec to @addr is within the Protected IPA space.
68 */
69static bool access_in_rec_par(struct rec *rec, unsigned long addr)
70{
71 /*
72 * It is OK to check only the base address of the access because:
73 * - The Protected IPA space starts at address zero.
74 * - The IPA width is below 64 bits, therefore the access cannot
75 * wrap around.
76 */
77 return addr_in_rec_par(rec, addr);
78}
79
80/*
81 * Returns 'true' if the @ipa is in PAR and its RIPAS is 'empty'.
82 *
83 * @ipa must be aligned to the granule size.
84 */
85static bool ipa_is_empty(unsigned long ipa, struct rec *rec)
86{
AlexeiFedorov8b117102023-04-18 15:32:07 +010087 struct s2_walk_result s2_walk;
88 enum s2_walk_status walk_status;
Soby Mathewb4c6df42022-11-09 11:13:29 +000089
90 assert(GRANULE_ALIGNED(ipa));
91
AlexeiFedorov8b117102023-04-18 15:32:07 +010092 walk_status = realm_ipa_to_pa(rec, ipa, &s2_walk);
93
94 if ((walk_status != WALK_INVALID_PARAMS) &&
AlexeiFedorov7ddb27c2023-04-18 16:16:31 +010095 (s2_walk.ripas_val == RIPAS_EMPTY)) {
AlexeiFedorov8b117102023-04-18 15:32:07 +010096 return true;
Soby Mathewb4c6df42022-11-09 11:13:29 +000097 }
AlexeiFedorov8b117102023-04-18 15:32:07 +010098 return false;
Soby Mathewb4c6df42022-11-09 11:13:29 +000099}
100
101static bool fsc_is_external_abort(unsigned long fsc)
102{
103 if (fsc == ESR_EL2_ABORT_FSC_SEA) {
104 return true;
105 }
106
107 if ((fsc >= ESR_EL2_ABORT_FSC_SEA_TTW_START) &&
108 (fsc <= ESR_EL2_ABORT_FSC_SEA_TTW_END)) {
109 return true;
110 }
111
112 return false;
113}
114
115/*
116 * Handles Data/Instruction Aborts at a lower EL with External Abort fault
117 * status code (D/IFSC).
118 * Returns 'true' if the exception is the external abort and the `rec_exit`
119 * structure is populated, 'false' otherwise.
120 */
121static bool handle_sync_external_abort(struct rec *rec,
122 struct rmi_rec_exit *rec_exit,
123 unsigned long esr)
124{
AlexeiFedorov537bee02023-02-02 13:38:23 +0000125 unsigned long fsc = esr & MASK(ESR_EL2_ABORT_FSC);
126 unsigned long set = esr & MASK(ESR_EL2_ABORT_SET);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000127
128 if (!fsc_is_external_abort(fsc)) {
129 return false;
130 }
131
132 switch (set) {
133 case ESR_EL2_ABORT_SET_UER:
134 /*
135 * The recoverable SEA.
136 * Inject the sync. abort into the Realm.
137 * Report the exception to the host.
138 */
139 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
140 /*
141 * Fall through.
142 */
143 case ESR_EL2_ABORT_SET_UEO:
144 /*
145 * The restartable SEA.
146 * Report the exception to the host.
147 * The REC restarts the same instruction.
148 */
149 rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
150
151 /*
152 * The value of the HPFAR_EL2 is not provided to the host as
153 * it is undefined for external aborts.
154 *
155 * We also don't provide the content of FAR_EL2 because it
156 * has no practical value to the host without the HPFAR_EL2.
157 */
158 break;
159 case ESR_EL2_ABORT_SET_UC:
160 /*
161 * The uncontainable SEA.
162 * Fatal to the system.
163 */
164 system_abort();
165 break;
166 default:
167 assert(false);
168 }
169
170 return true;
171}
172
173void emulate_stage2_data_abort(struct rec *rec,
174 struct rmi_rec_exit *rec_exit,
175 unsigned long rtt_level)
176{
177 unsigned long fipa = rec->regs[1];
178
179 assert(rtt_level <= RTT_PAGE_LEVEL);
180
181 /*
182 * Setup Exception Syndrom Register to emulate a real data abort
183 * and return to NS host to handle it.
184 */
185 rec_exit->esr = (ESR_EL2_EC_DATA_ABORT |
186 (ESR_EL2_ABORT_FSC_TRANSLATION_FAULT_L0 + rtt_level));
187 rec_exit->far = 0UL;
188 rec_exit->hpfar = fipa >> HPFAR_EL2_FIPA_OFFSET;
189 rec_exit->exit_reason = RMI_EXIT_SYNC;
190}
191
192/*
193 * Returns 'true' if the abort is handled and the RMM should return to the Realm,
194 * and returns 'false' if the exception should be reported to the HS host.
195 */
196static bool handle_data_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
197 unsigned long esr)
198{
199 unsigned long far = 0UL;
200 unsigned long hpfar = read_hpfar_el2();
AlexeiFedorov537bee02023-02-02 13:38:23 +0000201 unsigned long fipa = (hpfar & MASK(HPFAR_EL2_FIPA)) << HPFAR_EL2_FIPA_OFFSET;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000202 unsigned long write_val = 0UL;
203
204 if (handle_sync_external_abort(rec, rec_exit, esr)) {
205 /*
206 * All external aborts are immediately reported to the host.
207 */
208 return false;
209 }
210
211 /*
212 * The memory access that crosses a page boundary may cause two aborts
213 * with `hpfar_el2` values referring to two consecutive pages.
214 *
215 * Insert the SEA and return to the Realm if the granule's RIPAS is EMPTY.
216 */
217 if (ipa_is_empty(fipa, rec)) {
218 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
219 return true;
220 }
221
222 if (fixup_aarch32_data_abort(rec, &esr) ||
223 access_in_rec_par(rec, fipa)) {
224 esr &= ESR_NONEMULATED_ABORT_MASK;
225 goto end;
226 }
227
228 if (esr_is_write(esr)) {
229 write_val = get_dabt_write_value(rec, esr);
230 }
231
232 far = read_far_el2() & ~GRANULE_MASK;
233 esr &= ESR_EMULATED_ABORT_MASK;
234
235end:
236 rec_exit->esr = esr;
237 rec_exit->far = far;
238 rec_exit->hpfar = hpfar;
239 rec_exit->gprs[0] = write_val;
240
241 return false;
242}
243
244/*
245 * Returns 'true' if the abort is handled and the RMM should return to the Realm,
246 * and returns 'false' if the exception should be reported to the NS host.
247 */
248static bool handle_instruction_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
249 unsigned long esr)
250{
AlexeiFedorov537bee02023-02-02 13:38:23 +0000251 unsigned long fsc = esr & MASK(ESR_EL2_ABORT_FSC);
252 unsigned long fsc_type = fsc & ~MASK(ESR_EL2_ABORT_FSC_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000253 unsigned long hpfar = read_hpfar_el2();
AlexeiFedorov537bee02023-02-02 13:38:23 +0000254 unsigned long fipa = (hpfar & MASK(HPFAR_EL2_FIPA)) << HPFAR_EL2_FIPA_OFFSET;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000255
256 if (handle_sync_external_abort(rec, rec_exit, esr)) {
257 /*
258 * All external aborts are immediately reported to the host.
259 */
260 return false;
261 }
262
263 /*
264 * Insert the SEA and return to the Realm if:
265 * - The instruction abort is at an Unprotected IPA, or
266 * - The granule's RIPAS is EMPTY
267 */
268 if (!access_in_rec_par(rec, fipa) || ipa_is_empty(fipa, rec)) {
269 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
270 return true;
271 }
272
273 if (fsc_type != ESR_EL2_ABORT_FSC_TRANSLATION_FAULT) {
274 unsigned long far = read_far_el2();
275
276 /*
277 * TODO: Should this ever happen, or is it an indication of an
278 * internal consistency failure in the RMM which should lead
279 * to a panic instead?
280 */
281
282 ERROR("Unhandled instruction abort:\n");
283 ERROR(" FSC: %12s0x%02lx\n", " ", fsc);
284 ERROR(" FAR: %16lx\n", far);
285 ERROR(" HPFAR: %16lx\n", hpfar);
286 return false;
287 }
288
289 rec_exit->hpfar = hpfar;
290 rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
291
292 return false;
293}
294
295/*
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000296 * Handle FPU or SVE exceptions.
297 * Returns: true if the exception is handled.
298 */
299static bool
300handle_simd_exception(simd_t exp_type, struct rec *rec)
301{
302 /*
303 * If the REC wants to use SVE and if SVE is not enabled for this REC
304 * then inject undefined abort. This can happen when CPU implements
305 * FEAT_SVE but the Realm didn't request this feature during creation.
306 */
307 if (exp_type == SIMD_SVE && rec_simd_type(rec) != SIMD_SVE) {
308 realm_inject_undef_abort();
309 return true;
310 }
311
312 /* FPU or SVE exception can happen only when REC hasn't used SIMD */
313 assert(rec_is_simd_allowed(rec) == false);
314
315 /*
316 * Allow the REC to use SIMD. Save NS SIMD state and restore REC SIMD
317 * state from memory to registers.
318 */
319 simd_save_ns_state();
320 rec_simd_enable_restore(rec);
321
322 /*
323 * Return 'true' indicating that this exception has been handled and
324 * execution can continue.
325 */
326 return true;
327}
328
329/*
Soby Mathewb4c6df42022-11-09 11:13:29 +0000330 * Return 'false' if no IRQ is pending,
331 * return 'true' if there is an IRQ pending, and need to return to host.
332 */
333static bool check_pending_irq(void)
334{
335 unsigned long pending_irq;
336
337 pending_irq = read_isr_el1();
338
339 return (pending_irq != 0UL);
340}
341
342static void advance_pc(void)
343{
344 unsigned long pc = read_elr_el2();
345
346 write_elr_el2(pc + 4UL);
347}
348
349static void return_result_to_realm(struct rec *rec, struct smc_result result)
350{
351 rec->regs[0] = result.x[0];
352 rec->regs[1] = result.x[1];
353 rec->regs[2] = result.x[2];
354 rec->regs[3] = result.x[3];
355}
356
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000357static inline bool rsi_handler_needs_fpu(unsigned int id)
358{
359#ifdef RMM_FPU_USE_AT_REL2
360 if (id == SMC_RSI_ATTEST_TOKEN_CONTINUE ||
361 id == SMC_RSI_MEASUREMENT_EXTEND) {
362 return true;
363 }
364#endif
365 return false;
366}
367
Soby Mathewb4c6df42022-11-09 11:13:29 +0000368/*
369 * Return 'true' if execution should continue in the REC, otherwise return
370 * 'false' to go back to the NS caller of REC.Enter.
371 */
372static bool handle_realm_rsi(struct rec *rec, struct rmi_rec_exit *rec_exit)
373{
374 bool ret_to_rec = true; /* Return to Realm */
Shruti Gupta9debb132022-12-13 14:38:49 +0000375 unsigned int function_id = (unsigned int)rec->regs[0];
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000376 bool restore_rec_simd_state = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000377
AlexeiFedorov6c119692023-04-21 12:31:15 +0100378 RSI_LOG_SET(rec->regs);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000379
Arunachalam Ganapathy937b5492023-02-28 11:17:52 +0000380 /* Ignore SVE hint bit, until RMM supports SVE hint bit */
381 function_id &= ~MASK(SMC_SVE_HINT);
382
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000383 if (rsi_handler_needs_fpu(function_id) == true) {
384 /*
385 * RSI handler uses FPU at REL2, so actively save REC SIMD state
386 * if REC is using SIMD or NS SIMD state. Restore the same before
387 * return from this function.
388 */
389 if (rec_is_simd_allowed(rec)) {
390 rec_simd_save_disable(rec);
391 restore_rec_simd_state = true;
392 } else {
393 simd_save_ns_state();
394 }
395 } else if (rec_is_simd_allowed(rec)) {
396 /*
397 * If the REC is allowed to access SIMD, then we will enter RMM
398 * with SIMD traps disabled. So enable SIMD traps as RMM by
399 * default runs with SIMD traps enabled
400 */
Arunachalam Ganapathy43c2c6b2023-03-24 15:06:34 +0000401 simd_disable();
402 }
403
Soby Mathewb4c6df42022-11-09 11:13:29 +0000404 switch (function_id) {
405 case SMCCC_VERSION:
406 rec->regs[0] = SMCCC_VERSION_NUMBER;
407 break;
408 case SMC_RSI_ABI_VERSION:
AlexeiFedorov9472ee52023-04-26 12:23:43 +0100409 rec->regs[0] = handle_rsi_version();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000410 break;
411 case SMC32_PSCI_FID_MIN ... SMC32_PSCI_FID_MAX:
412 case SMC64_PSCI_FID_MIN ... SMC64_PSCI_FID_MAX: {
413 struct psci_result res;
414
415 res = psci_rsi(rec,
416 function_id,
417 rec->regs[1],
418 rec->regs[2],
419 rec->regs[3]);
420
421 if (!rec->psci_info.pending) {
422 rec->regs[0] = res.smc_res.x[0];
423 rec->regs[1] = res.smc_res.x[1];
424 rec->regs[2] = res.smc_res.x[2];
425 rec->regs[3] = res.smc_res.x[3];
426 }
427
428 if (res.hvc_forward.forward_psci_call) {
429 unsigned int i;
430
431 rec_exit->exit_reason = RMI_EXIT_PSCI;
432 rec_exit->gprs[0] = function_id;
433 rec_exit->gprs[1] = res.hvc_forward.x1;
434 rec_exit->gprs[2] = res.hvc_forward.x2;
435 rec_exit->gprs[3] = res.hvc_forward.x3;
436
437 for (i = 4U; i < REC_EXIT_NR_GPRS; i++) {
438 rec_exit->gprs[i] = 0UL;
439 }
440
441 advance_pc();
442 ret_to_rec = false;
443 }
444 break;
445 }
446 case SMC_RSI_ATTEST_TOKEN_INIT:
447 rec->regs[0] = handle_rsi_attest_token_init(rec);
448 break;
449 case SMC_RSI_ATTEST_TOKEN_CONTINUE: {
450 struct attest_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000451 while (true) {
452 /*
453 * Possible outcomes:
454 * if res.incomplete is true
455 * if IRQ pending
456 * check for pending IRQ and return to host
457 * else try a new iteration
458 * else
459 * if RTT table walk has failed,
460 * emulate data abort back to host
461 * otherwise
462 * return to realm because the token
463 * creation is complete or input parameter
464 * validation failed.
465 */
466 handle_rsi_attest_token_continue(rec, &res);
467
468 if (res.incomplete) {
469 if (check_pending_irq()) {
470 rec_exit->exit_reason = RMI_EXIT_IRQ;
Soby Mathew1be30212023-05-16 15:06:59 +0100471
472 /* Copy the result to rec prior to return to host */
473 return_result_to_realm(rec, res.smc_res);
474 advance_pc();
475
Soby Mathewb4c6df42022-11-09 11:13:29 +0000476 /* Return to NS host to handle IRQ. */
477 ret_to_rec = false;
478 break;
479 }
480 } else {
481 if (res.walk_result.abort) {
482 emulate_stage2_data_abort(
483 rec, rec_exit,
484 res.walk_result.rtt_level);
485 ret_to_rec = false; /* Exit to Host */
486 break;
487 }
488
489 /* Return to Realm */
490 return_result_to_realm(rec, res.smc_res);
491 break;
492 }
493 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000494 break;
495 }
496 case SMC_RSI_MEASUREMENT_READ:
497 rec->regs[0] = handle_rsi_read_measurement(rec);
498 break;
499 case SMC_RSI_MEASUREMENT_EXTEND:
500 rec->regs[0] = handle_rsi_extend_measurement(rec);
501 break;
502 case SMC_RSI_REALM_CONFIG: {
AlexeiFedorov5b186ad2023-04-26 14:43:18 +0100503 struct rsi_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000504
505 res = handle_rsi_realm_config(rec);
506 if (res.walk_result.abort) {
507 emulate_stage2_data_abort(rec, rec_exit,
508 res.walk_result.rtt_level);
509 ret_to_rec = false; /* Exit to Host */
510 } else {
511 /* Return to Realm */
512 return_result_to_realm(rec, res.smc_res);
513 }
514 break;
515 }
516 case SMC_RSI_IPA_STATE_SET:
517 if (handle_rsi_ipa_state_set(rec, rec_exit)) {
518 rec->regs[0] = RSI_ERROR_INPUT;
519 } else {
520 advance_pc();
521 ret_to_rec = false; /* Return to Host */
522 }
523 break;
524 case SMC_RSI_IPA_STATE_GET: {
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000525 struct rsi_walk_smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000526
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000527 res = handle_rsi_ipa_state_get(rec);
528 if (res.walk_result.abort) {
529 emulate_stage2_data_abort(rec, rec_exit,
530 res.walk_result.rtt_level);
531 /* Exit to Host */
532 ret_to_rec = false;
533 } else {
534 /* Exit to Realm */
535 return_result_to_realm(rec, res.smc_res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000536 }
537 break;
538 }
539 case SMC_RSI_HOST_CALL: {
AlexeiFedorov5b186ad2023-04-26 14:43:18 +0100540 struct rsi_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000541
542 res = handle_rsi_host_call(rec, rec_exit);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000543 if (res.walk_result.abort) {
544 emulate_stage2_data_abort(rec, rec_exit,
545 res.walk_result.rtt_level);
AlexeiFedorov591967c2022-11-16 17:47:34 +0000546 /* Exit to Host */
547 ret_to_rec = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000548 } else {
AlexeiFedorov5b186ad2023-04-26 14:43:18 +0100549 rec->regs[0] = res.smc_res.x[0];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000550
551 /*
552 * Return to Realm in case of error,
553 * parent function calls advance_pc()
554 */
555 if (rec->regs[0] == RSI_SUCCESS) {
556 advance_pc();
557
558 /* Exit to Host */
559 rec->host_call = true;
560 rec_exit->exit_reason = RMI_EXIT_HOST_CALL;
561 ret_to_rec = false;
562 }
563 }
564 break;
565 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000566 default:
567 rec->regs[0] = SMC_UNKNOWN;
568 break;
569 }
570
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000571 if (rsi_handler_needs_fpu(function_id) == true) {
572 if (restore_rec_simd_state == true) {
573 rec_simd_enable_restore(rec);
574 } else {
575 simd_restore_ns_state();
576 }
577 } else if (rec_is_simd_allowed(rec)) {
Arunachalam Ganapathy43c2c6b2023-03-24 15:06:34 +0000578 simd_enable(rec_simd_type(rec));
579 }
580
Soby Mathewb4c6df42022-11-09 11:13:29 +0000581 /* Log RSI call */
AlexeiFedorov6c119692023-04-21 12:31:15 +0100582 RSI_LOG_EXIT(function_id, rec->regs, ret_to_rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000583 return ret_to_rec;
584}
585
586/*
587 * Return 'true' if the RMM handled the exception,
588 * 'false' to return to the Non-secure host.
589 */
590static bool handle_exception_sync(struct rec *rec, struct rmi_rec_exit *rec_exit)
591{
592 const unsigned long esr = read_esr_el2();
593
AlexeiFedorov537bee02023-02-02 13:38:23 +0000594 switch (esr & MASK(ESR_EL2_EC)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000595 case ESR_EL2_EC_WFX:
AlexeiFedorov537bee02023-02-02 13:38:23 +0000596 rec_exit->esr = esr & (MASK(ESR_EL2_EC) | ESR_EL2_WFx_TI_BIT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000597 advance_pc();
598 return false;
599 case ESR_EL2_EC_HVC:
600 realm_inject_undef_abort();
601 return true;
602 case ESR_EL2_EC_SMC:
603 if (!handle_realm_rsi(rec, rec_exit)) {
604 return false;
605 }
606 /*
607 * Advance PC.
608 * HCR_EL2.TSC traps execution of the SMC instruction.
609 * It is not a routing control for the SMC exception.
610 * Trap exceptions and SMC exceptions have different
611 * preferred return addresses.
612 */
613 advance_pc();
614 return true;
615 case ESR_EL2_EC_SYSREG: {
616 bool ret = handle_sysreg_access_trap(rec, rec_exit, esr);
AlexeiFedorov5b186ad2023-04-26 14:43:18 +0100617
Soby Mathewb4c6df42022-11-09 11:13:29 +0000618 advance_pc();
619 return ret;
620 }
621 case ESR_EL2_EC_INST_ABORT:
622 return handle_instruction_abort(rec, rec_exit, esr);
623 case ESR_EL2_EC_DATA_ABORT:
624 return handle_data_abort(rec, rec_exit, esr);
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000625 case ESR_EL2_EC_FPU:
626 return handle_simd_exception(SIMD_FPU, rec);
627 case ESR_EL2_EC_SVE:
628 return handle_simd_exception(SIMD_SVE, rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000629 default:
630 /*
631 * TODO: Check if there are other exit reasons we could
632 * encounter here and handle them appropriately
633 */
634 break;
635 }
636
637 VERBOSE("Unhandled sync exit ESR: %08lx (EC: %lx ISS: %lx)\n",
AlexeiFedorov537bee02023-02-02 13:38:23 +0000638 esr, EXTRACT(ESR_EL2_EC, esr), EXTRACT(ESR_EL2_ISS, esr));
Soby Mathewb4c6df42022-11-09 11:13:29 +0000639
640 /*
641 * Zero values in esr, far & hpfar of 'rec_exit' structure
642 * will be returned to the NS host.
643 * The only information that may leak is when there was
644 * some unhandled/unknown reason for the exception.
645 */
646 return false;
647}
648
649/*
650 * Return 'true' if the RMM handled the exception, 'false' to return to the
651 * Non-secure host.
652 */
653static bool handle_exception_serror_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
654{
655 const unsigned long esr = read_esr_el2();
656
657 if (esr & ESR_EL2_SERROR_IDS_BIT) {
658 /*
659 * Implementation defined content of the esr.
660 */
661 system_abort();
662 }
663
AlexeiFedorov537bee02023-02-02 13:38:23 +0000664 if ((esr & MASK(ESR_EL2_SERROR_DFSC)) != ESR_EL2_SERROR_DFSC_ASYNC) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000665 /*
666 * Either Uncategorized or Reserved fault status code.
667 */
668 system_abort();
669 }
670
AlexeiFedorov537bee02023-02-02 13:38:23 +0000671 switch (esr & MASK(ESR_EL2_SERROR_AET)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000672 case ESR_EL2_SERROR_AET_UEU: /* Unrecoverable RAS Error */
673 case ESR_EL2_SERROR_AET_UER: /* Recoverable RAS Error */
674 /*
675 * The abort is fatal to the current S/W. Inject the SError into
676 * the Realm so it can e.g. shut down gracefully or localize the
677 * problem at the specific EL0 application.
678 *
679 * Note: Consider shutting down the Realm here to avoid
680 * the host's attack on unstable Realms.
681 */
682 inject_serror(rec, esr);
683 /*
684 * Fall through.
685 */
686 case ESR_EL2_SERROR_AET_CE: /* Corrected RAS Error */
687 case ESR_EL2_SERROR_AET_UEO: /* Restartable RAS Error */
688 /*
689 * Report the exception to the host.
690 */
691 rec_exit->esr = esr & ESR_SERROR_MASK;
692 break;
693 case ESR_EL2_SERROR_AET_UC: /* Uncontainable RAS Error */
694 system_abort();
695 break;
696 default:
697 /*
698 * Unrecognized Asynchronous Error Type
699 */
700 assert(false);
701 }
702
703 return false;
704}
705
706static bool handle_exception_irq_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
707{
708 (void)rec;
709
710 rec_exit->exit_reason = RMI_EXIT_IRQ;
711
712 /*
713 * With GIC all virtual interrupt programming
714 * must go via the NS hypervisor.
715 */
716 return false;
717}
718
719/* Returns 'true' when returning to Realm (S) and false when to NS */
720bool handle_realm_exit(struct rec *rec, struct rmi_rec_exit *rec_exit, int exception)
721{
722 switch (exception) {
723 case ARM_EXCEPTION_SYNC_LEL: {
724 bool ret;
725
726 /*
727 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
728 * information.
729 */
730 rec_exit->exit_reason = RMI_EXIT_SYNC;
731 ret = handle_exception_sync(rec, rec_exit);
732 if (!ret) {
733 rec->last_run_info.esr = read_esr_el2();
734 rec->last_run_info.far = read_far_el2();
735 rec->last_run_info.hpfar = read_hpfar_el2();
736 }
737 return ret;
738
739 /*
740 * TODO: Much more detailed handling of exit reasons.
741 */
742 }
743 case ARM_EXCEPTION_IRQ_LEL:
744 return handle_exception_irq_lel(rec, rec_exit);
745 case ARM_EXCEPTION_FIQ_LEL:
746 rec_exit->exit_reason = RMI_EXIT_FIQ;
747 break;
748 case ARM_EXCEPTION_SERROR_LEL: {
749 const unsigned long esr = read_esr_el2();
750 bool ret;
751
752 /*
753 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
754 * information.
755 */
756 rec_exit->exit_reason = RMI_EXIT_SERROR;
757 ret = handle_exception_serror_lel(rec, rec_exit);
758 if (!ret) {
759 rec->last_run_info.esr = esr;
760 rec->last_run_info.far = read_far_el2();
761 rec->last_run_info.hpfar = read_hpfar_el2();
762 }
763 return ret;
764 }
765 default:
766 INFO("Unrecognized exit reason: %d\n", exception);
767 break;
768 };
769
770 return false;
771}