blob: af465ff1d94bc68a1d56f44a3541bdbe78391019 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +00009#include <buffer.h>
10#include <esr.h>
11#include <exit.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000012#include <gic.h>
13#include <granule.h>
14#include <inject_exp.h>
15#include <memory_alloc.h>
16#include <psci.h>
17#include <realm.h>
18#include <realm_attest.h>
19#include <rec.h>
20#include <rsi-config.h>
21#include <rsi-handler.h>
22#include <rsi-host-call.h>
23#include <rsi-logger.h>
24#include <rsi-memory.h>
25#include <rsi-walk.h>
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +000026#include <run.h>
27#include <simd.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000028#include <smc-rmi.h>
29#include <smc-rsi.h>
30#include <status.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000031#include <sysreg_traps.h>
32#include <table.h>
33
34void save_fpu_state(struct fpu_state *fpu);
35void restore_fpu_state(struct fpu_state *fpu);
36
37static void system_abort(void)
38{
39 /*
40 * TODO: report the abort to the EL3.
41 * We need to establish the exact EL3 API first.
42 */
43 assert(false);
44}
45
46static bool fixup_aarch32_data_abort(struct rec *rec, unsigned long *esr)
47{
48 unsigned long spsr = read_spsr_el2();
49
50 if ((spsr & SPSR_EL2_nRW_AARCH32) != 0UL) {
51 /*
52 * mmio emulation of AArch32 reads/writes is not supported.
53 */
54 *esr &= ~ESR_EL2_ABORT_ISV_BIT;
55 return true;
56 }
57 return false;
58}
59
60static unsigned long get_dabt_write_value(struct rec *rec, unsigned long esr)
61{
62 unsigned int rt = esr_srt(esr);
63
64 /* Handle xzr */
65 if (rt == 31U) {
66 return 0UL;
67 }
68 return rec->regs[rt] & access_mask(esr);
69}
70
71/*
72 * Returns 'true' if access from @rec to @addr is within the Protected IPA space.
73 */
74static bool access_in_rec_par(struct rec *rec, unsigned long addr)
75{
76 /*
77 * It is OK to check only the base address of the access because:
78 * - The Protected IPA space starts at address zero.
79 * - The IPA width is below 64 bits, therefore the access cannot
80 * wrap around.
81 */
82 return addr_in_rec_par(rec, addr);
83}
84
85/*
86 * Returns 'true' if the @ipa is in PAR and its RIPAS is 'empty'.
87 *
88 * @ipa must be aligned to the granule size.
89 */
90static bool ipa_is_empty(unsigned long ipa, struct rec *rec)
91{
AlexeiFedorov8b117102023-04-18 15:32:07 +010092 struct s2_walk_result s2_walk;
93 enum s2_walk_status walk_status;
Soby Mathewb4c6df42022-11-09 11:13:29 +000094
95 assert(GRANULE_ALIGNED(ipa));
96
AlexeiFedorov8b117102023-04-18 15:32:07 +010097 walk_status = realm_ipa_to_pa(rec, ipa, &s2_walk);
98
99 if ((walk_status != WALK_INVALID_PARAMS) &&
100 (s2_walk.ripas == RIPAS_EMPTY)) {
101 return true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000102 }
AlexeiFedorov8b117102023-04-18 15:32:07 +0100103 return false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000104}
105
106static bool fsc_is_external_abort(unsigned long fsc)
107{
108 if (fsc == ESR_EL2_ABORT_FSC_SEA) {
109 return true;
110 }
111
112 if ((fsc >= ESR_EL2_ABORT_FSC_SEA_TTW_START) &&
113 (fsc <= ESR_EL2_ABORT_FSC_SEA_TTW_END)) {
114 return true;
115 }
116
117 return false;
118}
119
120/*
121 * Handles Data/Instruction Aborts at a lower EL with External Abort fault
122 * status code (D/IFSC).
123 * Returns 'true' if the exception is the external abort and the `rec_exit`
124 * structure is populated, 'false' otherwise.
125 */
126static bool handle_sync_external_abort(struct rec *rec,
127 struct rmi_rec_exit *rec_exit,
128 unsigned long esr)
129{
AlexeiFedorov537bee02023-02-02 13:38:23 +0000130 unsigned long fsc = esr & MASK(ESR_EL2_ABORT_FSC);
131 unsigned long set = esr & MASK(ESR_EL2_ABORT_SET);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000132
133 if (!fsc_is_external_abort(fsc)) {
134 return false;
135 }
136
137 switch (set) {
138 case ESR_EL2_ABORT_SET_UER:
139 /*
140 * The recoverable SEA.
141 * Inject the sync. abort into the Realm.
142 * Report the exception to the host.
143 */
144 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
145 /*
146 * Fall through.
147 */
148 case ESR_EL2_ABORT_SET_UEO:
149 /*
150 * The restartable SEA.
151 * Report the exception to the host.
152 * The REC restarts the same instruction.
153 */
154 rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
155
156 /*
157 * The value of the HPFAR_EL2 is not provided to the host as
158 * it is undefined for external aborts.
159 *
160 * We also don't provide the content of FAR_EL2 because it
161 * has no practical value to the host without the HPFAR_EL2.
162 */
163 break;
164 case ESR_EL2_ABORT_SET_UC:
165 /*
166 * The uncontainable SEA.
167 * Fatal to the system.
168 */
169 system_abort();
170 break;
171 default:
172 assert(false);
173 }
174
175 return true;
176}
177
178void emulate_stage2_data_abort(struct rec *rec,
179 struct rmi_rec_exit *rec_exit,
180 unsigned long rtt_level)
181{
182 unsigned long fipa = rec->regs[1];
183
184 assert(rtt_level <= RTT_PAGE_LEVEL);
185
186 /*
187 * Setup Exception Syndrom Register to emulate a real data abort
188 * and return to NS host to handle it.
189 */
190 rec_exit->esr = (ESR_EL2_EC_DATA_ABORT |
191 (ESR_EL2_ABORT_FSC_TRANSLATION_FAULT_L0 + rtt_level));
192 rec_exit->far = 0UL;
193 rec_exit->hpfar = fipa >> HPFAR_EL2_FIPA_OFFSET;
194 rec_exit->exit_reason = RMI_EXIT_SYNC;
195}
196
197/*
198 * Returns 'true' if the abort is handled and the RMM should return to the Realm,
199 * and returns 'false' if the exception should be reported to the HS host.
200 */
201static bool handle_data_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
202 unsigned long esr)
203{
204 unsigned long far = 0UL;
205 unsigned long hpfar = read_hpfar_el2();
AlexeiFedorov537bee02023-02-02 13:38:23 +0000206 unsigned long fipa = (hpfar & MASK(HPFAR_EL2_FIPA)) << HPFAR_EL2_FIPA_OFFSET;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000207 unsigned long write_val = 0UL;
208
209 if (handle_sync_external_abort(rec, rec_exit, esr)) {
210 /*
211 * All external aborts are immediately reported to the host.
212 */
213 return false;
214 }
215
216 /*
217 * The memory access that crosses a page boundary may cause two aborts
218 * with `hpfar_el2` values referring to two consecutive pages.
219 *
220 * Insert the SEA and return to the Realm if the granule's RIPAS is EMPTY.
221 */
222 if (ipa_is_empty(fipa, rec)) {
223 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
224 return true;
225 }
226
227 if (fixup_aarch32_data_abort(rec, &esr) ||
228 access_in_rec_par(rec, fipa)) {
229 esr &= ESR_NONEMULATED_ABORT_MASK;
230 goto end;
231 }
232
233 if (esr_is_write(esr)) {
234 write_val = get_dabt_write_value(rec, esr);
235 }
236
237 far = read_far_el2() & ~GRANULE_MASK;
238 esr &= ESR_EMULATED_ABORT_MASK;
239
240end:
241 rec_exit->esr = esr;
242 rec_exit->far = far;
243 rec_exit->hpfar = hpfar;
244 rec_exit->gprs[0] = write_val;
245
246 return false;
247}
248
249/*
250 * Returns 'true' if the abort is handled and the RMM should return to the Realm,
251 * and returns 'false' if the exception should be reported to the NS host.
252 */
253static bool handle_instruction_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
254 unsigned long esr)
255{
AlexeiFedorov537bee02023-02-02 13:38:23 +0000256 unsigned long fsc = esr & MASK(ESR_EL2_ABORT_FSC);
257 unsigned long fsc_type = fsc & ~MASK(ESR_EL2_ABORT_FSC_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000258 unsigned long hpfar = read_hpfar_el2();
AlexeiFedorov537bee02023-02-02 13:38:23 +0000259 unsigned long fipa = (hpfar & MASK(HPFAR_EL2_FIPA)) << HPFAR_EL2_FIPA_OFFSET;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000260
261 if (handle_sync_external_abort(rec, rec_exit, esr)) {
262 /*
263 * All external aborts are immediately reported to the host.
264 */
265 return false;
266 }
267
268 /*
269 * Insert the SEA and return to the Realm if:
270 * - The instruction abort is at an Unprotected IPA, or
271 * - The granule's RIPAS is EMPTY
272 */
273 if (!access_in_rec_par(rec, fipa) || ipa_is_empty(fipa, rec)) {
274 inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
275 return true;
276 }
277
278 if (fsc_type != ESR_EL2_ABORT_FSC_TRANSLATION_FAULT) {
279 unsigned long far = read_far_el2();
280
281 /*
282 * TODO: Should this ever happen, or is it an indication of an
283 * internal consistency failure in the RMM which should lead
284 * to a panic instead?
285 */
286
287 ERROR("Unhandled instruction abort:\n");
288 ERROR(" FSC: %12s0x%02lx\n", " ", fsc);
289 ERROR(" FAR: %16lx\n", far);
290 ERROR(" HPFAR: %16lx\n", hpfar);
291 return false;
292 }
293
294 rec_exit->hpfar = hpfar;
295 rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
296
297 return false;
298}
299
300/*
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000301 * Handle FPU or SVE exceptions.
302 * Returns: true if the exception is handled.
303 */
304static bool
305handle_simd_exception(simd_t exp_type, struct rec *rec)
306{
307 /*
308 * If the REC wants to use SVE and if SVE is not enabled for this REC
309 * then inject undefined abort. This can happen when CPU implements
310 * FEAT_SVE but the Realm didn't request this feature during creation.
311 */
312 if (exp_type == SIMD_SVE && rec_simd_type(rec) != SIMD_SVE) {
313 realm_inject_undef_abort();
314 return true;
315 }
316
317 /* FPU or SVE exception can happen only when REC hasn't used SIMD */
318 assert(rec_is_simd_allowed(rec) == false);
319
320 /*
321 * Allow the REC to use SIMD. Save NS SIMD state and restore REC SIMD
322 * state from memory to registers.
323 */
324 simd_save_ns_state();
325 rec_simd_enable_restore(rec);
326
327 /*
328 * Return 'true' indicating that this exception has been handled and
329 * execution can continue.
330 */
331 return true;
332}
333
334/*
Soby Mathewb4c6df42022-11-09 11:13:29 +0000335 * Return 'false' if no IRQ is pending,
336 * return 'true' if there is an IRQ pending, and need to return to host.
337 */
338static bool check_pending_irq(void)
339{
340 unsigned long pending_irq;
341
342 pending_irq = read_isr_el1();
343
344 return (pending_irq != 0UL);
345}
346
347static void advance_pc(void)
348{
349 unsigned long pc = read_elr_el2();
350
351 write_elr_el2(pc + 4UL);
352}
353
354static void return_result_to_realm(struct rec *rec, struct smc_result result)
355{
356 rec->regs[0] = result.x[0];
357 rec->regs[1] = result.x[1];
358 rec->regs[2] = result.x[2];
359 rec->regs[3] = result.x[3];
360}
361
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000362static inline bool rsi_handler_needs_fpu(unsigned int id)
363{
364#ifdef RMM_FPU_USE_AT_REL2
365 if (id == SMC_RSI_ATTEST_TOKEN_CONTINUE ||
366 id == SMC_RSI_MEASUREMENT_EXTEND) {
367 return true;
368 }
369#endif
370 return false;
371}
372
Soby Mathewb4c6df42022-11-09 11:13:29 +0000373/*
374 * Return 'true' if execution should continue in the REC, otherwise return
375 * 'false' to go back to the NS caller of REC.Enter.
376 */
377static bool handle_realm_rsi(struct rec *rec, struct rmi_rec_exit *rec_exit)
378{
379 bool ret_to_rec = true; /* Return to Realm */
Shruti Gupta9debb132022-12-13 14:38:49 +0000380 unsigned int function_id = (unsigned int)rec->regs[0];
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000381 bool restore_rec_simd_state = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000382
AlexeiFedorov6c119692023-04-21 12:31:15 +0100383 RSI_LOG_SET(rec->regs);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000384
Arunachalam Ganapathy937b5492023-02-28 11:17:52 +0000385 /* Ignore SVE hint bit, until RMM supports SVE hint bit */
386 function_id &= ~MASK(SMC_SVE_HINT);
387
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000388 if (rsi_handler_needs_fpu(function_id) == true) {
389 /*
390 * RSI handler uses FPU at REL2, so actively save REC SIMD state
391 * if REC is using SIMD or NS SIMD state. Restore the same before
392 * return from this function.
393 */
394 if (rec_is_simd_allowed(rec)) {
395 rec_simd_save_disable(rec);
396 restore_rec_simd_state = true;
397 } else {
398 simd_save_ns_state();
399 }
400 } else if (rec_is_simd_allowed(rec)) {
401 /*
402 * If the REC is allowed to access SIMD, then we will enter RMM
403 * with SIMD traps disabled. So enable SIMD traps as RMM by
404 * default runs with SIMD traps enabled
405 */
Arunachalam Ganapathy43c2c6b2023-03-24 15:06:34 +0000406 simd_disable();
407 }
408
Soby Mathewb4c6df42022-11-09 11:13:29 +0000409 switch (function_id) {
410 case SMCCC_VERSION:
411 rec->regs[0] = SMCCC_VERSION_NUMBER;
412 break;
413 case SMC_RSI_ABI_VERSION:
414 rec->regs[0] = system_rsi_abi_version();
415 break;
416 case SMC32_PSCI_FID_MIN ... SMC32_PSCI_FID_MAX:
417 case SMC64_PSCI_FID_MIN ... SMC64_PSCI_FID_MAX: {
418 struct psci_result res;
419
420 res = psci_rsi(rec,
421 function_id,
422 rec->regs[1],
423 rec->regs[2],
424 rec->regs[3]);
425
426 if (!rec->psci_info.pending) {
427 rec->regs[0] = res.smc_res.x[0];
428 rec->regs[1] = res.smc_res.x[1];
429 rec->regs[2] = res.smc_res.x[2];
430 rec->regs[3] = res.smc_res.x[3];
431 }
432
433 if (res.hvc_forward.forward_psci_call) {
434 unsigned int i;
435
436 rec_exit->exit_reason = RMI_EXIT_PSCI;
437 rec_exit->gprs[0] = function_id;
438 rec_exit->gprs[1] = res.hvc_forward.x1;
439 rec_exit->gprs[2] = res.hvc_forward.x2;
440 rec_exit->gprs[3] = res.hvc_forward.x3;
441
442 for (i = 4U; i < REC_EXIT_NR_GPRS; i++) {
443 rec_exit->gprs[i] = 0UL;
444 }
445
446 advance_pc();
447 ret_to_rec = false;
448 }
449 break;
450 }
451 case SMC_RSI_ATTEST_TOKEN_INIT:
452 rec->regs[0] = handle_rsi_attest_token_init(rec);
453 break;
454 case SMC_RSI_ATTEST_TOKEN_CONTINUE: {
455 struct attest_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000456 while (true) {
457 /*
458 * Possible outcomes:
459 * if res.incomplete is true
460 * if IRQ pending
461 * check for pending IRQ and return to host
462 * else try a new iteration
463 * else
464 * if RTT table walk has failed,
465 * emulate data abort back to host
466 * otherwise
467 * return to realm because the token
468 * creation is complete or input parameter
469 * validation failed.
470 */
471 handle_rsi_attest_token_continue(rec, &res);
472
473 if (res.incomplete) {
474 if (check_pending_irq()) {
475 rec_exit->exit_reason = RMI_EXIT_IRQ;
Soby Mathew1be30212023-05-16 15:06:59 +0100476
477 /* Copy the result to rec prior to return to host */
478 return_result_to_realm(rec, res.smc_res);
479 advance_pc();
480
Soby Mathewb4c6df42022-11-09 11:13:29 +0000481 /* Return to NS host to handle IRQ. */
482 ret_to_rec = false;
483 break;
484 }
485 } else {
486 if (res.walk_result.abort) {
487 emulate_stage2_data_abort(
488 rec, rec_exit,
489 res.walk_result.rtt_level);
490 ret_to_rec = false; /* Exit to Host */
491 break;
492 }
493
494 /* Return to Realm */
495 return_result_to_realm(rec, res.smc_res);
496 break;
497 }
498 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000499 break;
500 }
501 case SMC_RSI_MEASUREMENT_READ:
502 rec->regs[0] = handle_rsi_read_measurement(rec);
503 break;
504 case SMC_RSI_MEASUREMENT_EXTEND:
505 rec->regs[0] = handle_rsi_extend_measurement(rec);
506 break;
507 case SMC_RSI_REALM_CONFIG: {
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000508 struct rsi_walk_smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000509
510 res = handle_rsi_realm_config(rec);
511 if (res.walk_result.abort) {
512 emulate_stage2_data_abort(rec, rec_exit,
513 res.walk_result.rtt_level);
514 ret_to_rec = false; /* Exit to Host */
515 } else {
516 /* Return to Realm */
517 return_result_to_realm(rec, res.smc_res);
518 }
519 break;
520 }
521 case SMC_RSI_IPA_STATE_SET:
522 if (handle_rsi_ipa_state_set(rec, rec_exit)) {
523 rec->regs[0] = RSI_ERROR_INPUT;
524 } else {
525 advance_pc();
526 ret_to_rec = false; /* Return to Host */
527 }
528 break;
529 case SMC_RSI_IPA_STATE_GET: {
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000530 struct rsi_walk_smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000531
Arunachalam Ganapathydbaa8862022-11-03 13:56:18 +0000532 res = handle_rsi_ipa_state_get(rec);
533 if (res.walk_result.abort) {
534 emulate_stage2_data_abort(rec, rec_exit,
535 res.walk_result.rtt_level);
536 /* Exit to Host */
537 ret_to_rec = false;
538 } else {
539 /* Exit to Realm */
540 return_result_to_realm(rec, res.smc_res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000541 }
542 break;
543 }
544 case SMC_RSI_HOST_CALL: {
545 struct rsi_host_call_result res;
546
547 res = handle_rsi_host_call(rec, rec_exit);
548
549 if (res.walk_result.abort) {
550 emulate_stage2_data_abort(rec, rec_exit,
551 res.walk_result.rtt_level);
AlexeiFedorov591967c2022-11-16 17:47:34 +0000552 /* Exit to Host */
553 ret_to_rec = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000554 } else {
555 rec->regs[0] = res.smc_result;
556
557 /*
558 * Return to Realm in case of error,
559 * parent function calls advance_pc()
560 */
561 if (rec->regs[0] == RSI_SUCCESS) {
562 advance_pc();
563
564 /* Exit to Host */
565 rec->host_call = true;
566 rec_exit->exit_reason = RMI_EXIT_HOST_CALL;
567 ret_to_rec = false;
568 }
569 }
570 break;
571 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000572 default:
573 rec->regs[0] = SMC_UNKNOWN;
574 break;
575 }
576
Arunachalam Ganapathy51119932023-03-23 12:32:49 +0000577 if (rsi_handler_needs_fpu(function_id) == true) {
578 if (restore_rec_simd_state == true) {
579 rec_simd_enable_restore(rec);
580 } else {
581 simd_restore_ns_state();
582 }
583 } else if (rec_is_simd_allowed(rec)) {
Arunachalam Ganapathy43c2c6b2023-03-24 15:06:34 +0000584 simd_enable(rec_simd_type(rec));
585 }
586
Soby Mathewb4c6df42022-11-09 11:13:29 +0000587 /* Log RSI call */
AlexeiFedorov6c119692023-04-21 12:31:15 +0100588 RSI_LOG_EXIT(function_id, rec->regs, ret_to_rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000589 return ret_to_rec;
590}
591
592/*
593 * Return 'true' if the RMM handled the exception,
594 * 'false' to return to the Non-secure host.
595 */
596static bool handle_exception_sync(struct rec *rec, struct rmi_rec_exit *rec_exit)
597{
598 const unsigned long esr = read_esr_el2();
599
AlexeiFedorov537bee02023-02-02 13:38:23 +0000600 switch (esr & MASK(ESR_EL2_EC)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000601 case ESR_EL2_EC_WFX:
AlexeiFedorov537bee02023-02-02 13:38:23 +0000602 rec_exit->esr = esr & (MASK(ESR_EL2_EC) | ESR_EL2_WFx_TI_BIT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000603 advance_pc();
604 return false;
605 case ESR_EL2_EC_HVC:
606 realm_inject_undef_abort();
607 return true;
608 case ESR_EL2_EC_SMC:
609 if (!handle_realm_rsi(rec, rec_exit)) {
610 return false;
611 }
612 /*
613 * Advance PC.
614 * HCR_EL2.TSC traps execution of the SMC instruction.
615 * It is not a routing control for the SMC exception.
616 * Trap exceptions and SMC exceptions have different
617 * preferred return addresses.
618 */
619 advance_pc();
620 return true;
621 case ESR_EL2_EC_SYSREG: {
622 bool ret = handle_sysreg_access_trap(rec, rec_exit, esr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000623 advance_pc();
624 return ret;
625 }
626 case ESR_EL2_EC_INST_ABORT:
627 return handle_instruction_abort(rec, rec_exit, esr);
628 case ESR_EL2_EC_DATA_ABORT:
629 return handle_data_abort(rec, rec_exit, esr);
Arunachalam Ganapathyf6491212023-02-23 16:04:34 +0000630 case ESR_EL2_EC_FPU:
631 return handle_simd_exception(SIMD_FPU, rec);
632 case ESR_EL2_EC_SVE:
633 return handle_simd_exception(SIMD_SVE, rec);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000634 default:
635 /*
636 * TODO: Check if there are other exit reasons we could
637 * encounter here and handle them appropriately
638 */
639 break;
640 }
641
642 VERBOSE("Unhandled sync exit ESR: %08lx (EC: %lx ISS: %lx)\n",
AlexeiFedorov537bee02023-02-02 13:38:23 +0000643 esr, EXTRACT(ESR_EL2_EC, esr), EXTRACT(ESR_EL2_ISS, esr));
Soby Mathewb4c6df42022-11-09 11:13:29 +0000644
645 /*
646 * Zero values in esr, far & hpfar of 'rec_exit' structure
647 * will be returned to the NS host.
648 * The only information that may leak is when there was
649 * some unhandled/unknown reason for the exception.
650 */
651 return false;
652}
653
654/*
655 * Return 'true' if the RMM handled the exception, 'false' to return to the
656 * Non-secure host.
657 */
658static bool handle_exception_serror_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
659{
660 const unsigned long esr = read_esr_el2();
661
662 if (esr & ESR_EL2_SERROR_IDS_BIT) {
663 /*
664 * Implementation defined content of the esr.
665 */
666 system_abort();
667 }
668
AlexeiFedorov537bee02023-02-02 13:38:23 +0000669 if ((esr & MASK(ESR_EL2_SERROR_DFSC)) != ESR_EL2_SERROR_DFSC_ASYNC) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000670 /*
671 * Either Uncategorized or Reserved fault status code.
672 */
673 system_abort();
674 }
675
AlexeiFedorov537bee02023-02-02 13:38:23 +0000676 switch (esr & MASK(ESR_EL2_SERROR_AET)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000677 case ESR_EL2_SERROR_AET_UEU: /* Unrecoverable RAS Error */
678 case ESR_EL2_SERROR_AET_UER: /* Recoverable RAS Error */
679 /*
680 * The abort is fatal to the current S/W. Inject the SError into
681 * the Realm so it can e.g. shut down gracefully or localize the
682 * problem at the specific EL0 application.
683 *
684 * Note: Consider shutting down the Realm here to avoid
685 * the host's attack on unstable Realms.
686 */
687 inject_serror(rec, esr);
688 /*
689 * Fall through.
690 */
691 case ESR_EL2_SERROR_AET_CE: /* Corrected RAS Error */
692 case ESR_EL2_SERROR_AET_UEO: /* Restartable RAS Error */
693 /*
694 * Report the exception to the host.
695 */
696 rec_exit->esr = esr & ESR_SERROR_MASK;
697 break;
698 case ESR_EL2_SERROR_AET_UC: /* Uncontainable RAS Error */
699 system_abort();
700 break;
701 default:
702 /*
703 * Unrecognized Asynchronous Error Type
704 */
705 assert(false);
706 }
707
708 return false;
709}
710
711static bool handle_exception_irq_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
712{
713 (void)rec;
714
715 rec_exit->exit_reason = RMI_EXIT_IRQ;
716
717 /*
718 * With GIC all virtual interrupt programming
719 * must go via the NS hypervisor.
720 */
721 return false;
722}
723
724/* Returns 'true' when returning to Realm (S) and false when to NS */
725bool handle_realm_exit(struct rec *rec, struct rmi_rec_exit *rec_exit, int exception)
726{
727 switch (exception) {
728 case ARM_EXCEPTION_SYNC_LEL: {
729 bool ret;
730
731 /*
732 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
733 * information.
734 */
735 rec_exit->exit_reason = RMI_EXIT_SYNC;
736 ret = handle_exception_sync(rec, rec_exit);
737 if (!ret) {
738 rec->last_run_info.esr = read_esr_el2();
739 rec->last_run_info.far = read_far_el2();
740 rec->last_run_info.hpfar = read_hpfar_el2();
741 }
742 return ret;
743
744 /*
745 * TODO: Much more detailed handling of exit reasons.
746 */
747 }
748 case ARM_EXCEPTION_IRQ_LEL:
749 return handle_exception_irq_lel(rec, rec_exit);
750 case ARM_EXCEPTION_FIQ_LEL:
751 rec_exit->exit_reason = RMI_EXIT_FIQ;
752 break;
753 case ARM_EXCEPTION_SERROR_LEL: {
754 const unsigned long esr = read_esr_el2();
755 bool ret;
756
757 /*
758 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
759 * information.
760 */
761 rec_exit->exit_reason = RMI_EXIT_SERROR;
762 ret = handle_exception_serror_lel(rec, rec_exit);
763 if (!ret) {
764 rec->last_run_info.esr = esr;
765 rec->last_run_info.far = read_far_el2();
766 rec->last_run_info.hpfar = read_hpfar_el2();
767 }
768 return ret;
769 }
770 default:
771 INFO("Unrecognized exit reason: %d\n", exception);
772 break;
773 };
774
775 return false;
776}