blob: f7851d9c89e8a8bdc1a9b270f78436a3afc2940a [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullc960c032018-10-24 15:13:35 +01009#include <stdnoreturn.h>
10
Andrew Walbran1f32e722019-06-07 17:57:26 +010011#include "hf/arch/barriers.h"
Andrew Scullc960c032018-10-24 15:13:35 +010012#include "hf/arch/init.h"
Olivier Deprez98ad2d22020-05-20 09:52:43 +020013#include "hf/arch/mmu.h"
Andrew Scull07b6bd32019-12-12 17:19:55 +000014#include "hf/arch/plat/smc.h"
Andrew Scullc960c032018-10-24 15:13:35 +010015
Andrew Scull18c78fc2018-08-20 12:57:41 +010016#include "hf/api.h"
Fuad Tabbac76466d2019-09-06 10:42:12 +010017#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/cpu.h"
19#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010020#include "hf/ffa.h"
J-Alvesb37fd082020-10-22 12:29:21 +010021#include "hf/ffa_internal.h"
Andrew Sculla9c172d2019-04-03 14:10:00 +010022#include "hf/panic.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/vm.h"
24
Andrew Scullf35a5c92018-08-07 18:09:46 +010025#include "vmapi/hf/call.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010026
Fuad Tabbac76466d2019-09-06 10:42:12 +010027#include "debug_el1.h"
Fuad Tabba77a4b012019-11-15 12:13:08 +000028#include "feature_id.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010029#include "msr.h"
Fuad Tabbaf1d6dc52019-09-18 17:33:14 +010030#include "perfmon.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010031#include "psci.h"
Andrew Walbran33645652019-04-15 12:29:31 +010032#include "psci_handler.h"
Andrew Scull7fd4bb72018-12-08 23:40:12 +000033#include "smc.h"
Fuad Tabbaba8c44d2019-09-23 14:38:58 +010034#include "sysregs.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010035
Fuad Tabbac76466d2019-09-06 10:42:12 +010036/**
Olivier Deprez98ad2d22020-05-20 09:52:43 +020037 * Hypervisor Fault Address Register Non-Secure.
38 */
39#define HPFAR_EL2_NS (UINT64_C(0x1) << 63)
40
41/**
42 * Hypervisor Fault Address Register Faulting IPA.
43 */
44#define HPFAR_EL2_FIPA (UINT64_C(0xFFFFFFFFFF0))
45
46/**
Fuad Tabbac76466d2019-09-06 10:42:12 +010047 * Gets the value to increment for the next PC.
48 * The ESR encodes whether the instruction is 2 bytes or 4 bytes long.
49 */
Fuad Tabba3e9b0222019-11-11 16:47:50 +000050#define GET_NEXT_PC_INC(esr) (GET_ESR_IL(esr) ? 4 : 2)
Fuad Tabbac76466d2019-09-06 10:42:12 +010051
Fuad Tabbac76466d2019-09-06 10:42:12 +010052/**
Andrew Walbran0dd67ff2019-09-12 16:38:50 +010053 * The Client ID field within X7 for an SMC64 call.
54 */
55#define CLIENT_ID_MASK UINT64_C(0xffff)
56
57/**
Fuad Tabbac76466d2019-09-06 10:42:12 +010058 * Returns a reference to the currently executing vCPU.
59 */
Andrew Scullc960c032018-10-24 15:13:35 +010060static struct vcpu *current(void)
Andrew Walbran3d84a262018-12-13 14:41:19 +000061{
62 return (struct vcpu *)read_msr(tpidr_el2);
63}
64
Andrew Walbran1f8d4872018-12-20 11:21:32 +000065/**
66 * Saves the state of per-vCPU peripherals, such as the virtual timer, and
67 * informs the arch-independent sections that registers have been saved.
68 */
69void complete_saving_state(struct vcpu *vcpu)
70{
Andrew Walbran6480f8f2019-06-05 17:39:14 +010071 vcpu->regs.peripherals.cntv_cval_el0 = read_msr(cntv_cval_el0);
72 vcpu->regs.peripherals.cntv_ctl_el0 = read_msr(cntv_ctl_el0);
Andrew Walbran1f8d4872018-12-20 11:21:32 +000073
74 api_regs_state_saved(vcpu);
75
76 /*
77 * If switching away from the primary, copy the current EL0 virtual
78 * timer registers to the corresponding EL2 physical timer registers.
79 * This is used to emulate the virtual timer for the primary in case it
80 * should fire while the secondary is running.
81 */
82 if (vcpu->vm->id == HF_PRIMARY_VM_ID) {
83 /*
84 * Clear timer control register before copying compare value, to
85 * avoid a spurious timer interrupt. This could be a problem if
86 * the interrupt is configured as edge-triggered, as it would
87 * then be latched in.
88 */
89 write_msr(cnthp_ctl_el2, 0);
90 write_msr(cnthp_cval_el2, read_msr(cntv_cval_el0));
91 write_msr(cnthp_ctl_el2, read_msr(cntv_ctl_el0));
92 }
93}
94
95/**
96 * Restores the state of per-vCPU peripherals, such as the virtual timer.
97 */
98void begin_restoring_state(struct vcpu *vcpu)
99{
100 /*
101 * Clear timer control register before restoring compare value, to avoid
102 * a spurious timer interrupt. This could be a problem if the interrupt
103 * is configured as edge-triggered, as it would then be latched in.
104 */
105 write_msr(cntv_ctl_el0, 0);
Andrew Walbran6480f8f2019-06-05 17:39:14 +0100106 write_msr(cntv_cval_el0, vcpu->regs.peripherals.cntv_cval_el0);
107 write_msr(cntv_ctl_el0, vcpu->regs.peripherals.cntv_ctl_el0);
Andrew Walbran1f8d4872018-12-20 11:21:32 +0000108
109 /*
110 * If we are switching (back) to the primary, disable the EL2 physical
111 * timer which was being used to emulate the EL0 virtual timer, as the
112 * virtual timer is now running for the primary again.
113 */
114 if (vcpu->vm->id == HF_PRIMARY_VM_ID) {
115 write_msr(cnthp_ctl_el2, 0);
116 write_msr(cnthp_cval_el2, 0);
117 }
118}
119
Andrew Walbran1f32e722019-06-07 17:57:26 +0100120/**
Andrew Walbran1f32e722019-06-07 17:57:26 +0100121 * Invalidate all stage 1 TLB entries on the current (physical) CPU for the
122 * current VMID.
123 */
124static void invalidate_vm_tlb(void)
125{
Andrew Walbrancff1f682019-07-04 14:52:45 +0100126 /*
127 * Ensure that the last VTTBR write has taken effect so we invalidate
128 * the right set of TLB entries.
129 */
Andrew Walbran1f32e722019-06-07 17:57:26 +0100130 isb();
Andrew Walbrancff1f682019-07-04 14:52:45 +0100131
Andrew Walbran1f32e722019-06-07 17:57:26 +0100132 __asm__ volatile("tlbi vmalle1");
Andrew Walbrancff1f682019-07-04 14:52:45 +0100133
134 /*
135 * Ensure that no instructions are fetched for the VM until after the
136 * TLB invalidation has taken effect.
137 */
Andrew Walbran1f32e722019-06-07 17:57:26 +0100138 isb();
Andrew Walbrancff1f682019-07-04 14:52:45 +0100139
140 /*
141 * Ensure that no data reads or writes for the VM happen until after the
Fuad Tabba77a4b012019-11-15 12:13:08 +0000142 * TLB invalidation has taken effect. Non-shareable is enough because
143 * the TLB is local to the CPU.
Andrew Walbrancff1f682019-07-04 14:52:45 +0100144 */
David Brazdil851948e2019-08-09 12:02:12 +0100145 dsb(nsh);
Andrew Walbran1f32e722019-06-07 17:57:26 +0100146}
147
148/**
149 * Invalidates the TLB if a different vCPU is being run than the last vCPU of
150 * the same VM which was run on the current pCPU.
151 *
152 * This is necessary because VMs may (contrary to the architecture
153 * specification) use inconsistent ASIDs across vCPUs. c.f. KVM's similar
154 * workaround:
155 * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=94d0e5980d6791b9
156 */
157void maybe_invalidate_tlb(struct vcpu *vcpu)
158{
159 size_t current_cpu_index = cpu_index(vcpu->cpu);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100160 ffa_vcpu_index_t new_vcpu_index = vcpu_index(vcpu);
Andrew Walbran1f32e722019-06-07 17:57:26 +0100161
162 if (vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] !=
163 new_vcpu_index) {
164 /*
165 * The vCPU has changed since the last time this VM was run on
166 * this pCPU, so we need to invalidate the TLB.
167 */
168 invalidate_vm_tlb();
169
170 /* Record the fact that this vCPU is now running on this CPU. */
171 vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] =
172 new_vcpu_index;
173 }
174}
175
David Brazdil768f69c2019-12-19 15:46:12 +0000176noreturn void irq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100177{
Wedson Almeida Filho9d5040f2018-10-29 08:41:27 +0000178 (void)elr;
179 (void)spsr;
180
Fuad Tabbad1d67982020-01-08 11:28:29 +0000181 panic("IRQ from current exception level.");
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100182}
183
David Brazdil768f69c2019-12-19 15:46:12 +0000184noreturn void fiq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100185{
Wedson Almeida Filho9d5040f2018-10-29 08:41:27 +0000186 (void)elr;
187 (void)spsr;
188
Fuad Tabbad1d67982020-01-08 11:28:29 +0000189 panic("FIQ from current exception level.");
Wedson Almeida Filho9d5040f2018-10-29 08:41:27 +0000190}
191
David Brazdil768f69c2019-12-19 15:46:12 +0000192noreturn void serr_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
Wedson Almeida Filho9d5040f2018-10-29 08:41:27 +0000193{
194 (void)elr;
195 (void)spsr;
196
Fuad Tabbad1d67982020-01-08 11:28:29 +0000197 panic("SError from current exception level.");
Wedson Almeida Filho9d5040f2018-10-29 08:41:27 +0000198}
199
David Brazdil768f69c2019-12-19 15:46:12 +0000200noreturn void sync_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
Wedson Almeida Filho9d5040f2018-10-29 08:41:27 +0000201{
202 uintreg_t esr = read_msr(esr_el2);
Fuad Tabba3e9b0222019-11-11 16:47:50 +0000203 uintreg_t ec = GET_ESR_EC(esr);
Wedson Almeida Filho9d5040f2018-10-29 08:41:27 +0000204
205 (void)spsr;
206
Fuad Tabbac76466d2019-09-06 10:42:12 +0100207 switch (ec) {
Fuad Tabbab86325a2020-01-10 13:38:15 +0000208 case EC_DATA_ABORT_SAME_EL:
Andrew Walbrane52006c2019-10-22 18:01:28 +0100209 if (!(esr & (1U << 10))) { /* Check FnV bit. */
Andrew Walbran17eebf92020-02-05 16:35:49 +0000210 dlog_error(
211 "Data abort: pc=%#x, esr=%#x, ec=%#x, "
212 "far=%#x\n",
213 elr, esr, ec, read_msr(far_el2));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100214 } else {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000215 dlog_error(
216 "Data abort: pc=%#x, esr=%#x, ec=%#x, "
217 "far=invalid\n",
218 elr, esr, ec);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100219 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100220
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000221 break;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100222
223 default:
Andrew Walbran17eebf92020-02-05 16:35:49 +0000224 dlog_error(
225 "Unknown current sync exception pc=%#x, esr=%#x, "
226 "ec=%#x\n",
227 elr, esr, ec);
Andrew Scullc960c032018-10-24 15:13:35 +0100228 break;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100229 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000230
Andrew Sculla9c172d2019-04-03 14:10:00 +0100231 panic("EL2 exception");
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100232}
233
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100234/**
Andrew Walbran3d84a262018-12-13 14:41:19 +0000235 * Sets or clears the VI bit in the HCR_EL2 register saved in the given
236 * arch_regs.
237 */
238static void set_virtual_interrupt(struct arch_regs *r, bool enable)
239{
240 if (enable) {
241 r->lazy.hcr_el2 |= HCR_EL2_VI;
242 } else {
243 r->lazy.hcr_el2 &= ~HCR_EL2_VI;
244 }
245}
246
247/**
248 * Sets or clears the VI bit in the HCR_EL2 register.
249 */
250static void set_virtual_interrupt_current(bool enable)
251{
252 uintreg_t hcr_el2 = read_msr(hcr_el2);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000253
Andrew Walbran3d84a262018-12-13 14:41:19 +0000254 if (enable) {
255 hcr_el2 |= HCR_EL2_VI;
256 } else {
257 hcr_el2 &= ~HCR_EL2_VI;
258 }
259 write_msr(hcr_el2, hcr_el2);
260}
261
J-Alvesb37fd082020-10-22 12:29:21 +0100262#if SECURE_WORLD == 1
263static bool sp_boot_next(struct vcpu *current, struct vcpu **next,
264 struct ffa_value *ffa_ret)
265{
266 struct vm_locked current_vm_locked;
267 struct vm *vm_next = NULL;
268 bool ret = false;
269
270 /*
271 * If VM hasn't been initialized, initialize it and traverse
272 * booting list following "next_boot" field in the VM structure.
273 * Once all the SPs have been booted (when "next_boot" is NULL),
274 * return execution to the NWd.
275 */
276 current_vm_locked = vm_lock(current->vm);
277 if (current_vm_locked.vm->initialized == false) {
278 current_vm_locked.vm->initialized = true;
279 dlog_verbose("Initialized VM: %#x, boot_order: %u\n",
280 current_vm_locked.vm->id,
281 current_vm_locked.vm->boot_order);
282
283 if (current_vm_locked.vm->next_boot != NULL) {
284 current->state = VCPU_STATE_BLOCKED_MAILBOX;
285 vm_next = current_vm_locked.vm->next_boot;
286 CHECK(vm_next->initialized == false);
287 *next = vm_get_vcpu(vm_next, vcpu_index(current));
288 arch_regs_reset(*next);
289 (*next)->cpu = current->cpu;
290 (*next)->state = VCPU_STATE_RUNNING;
291 (*next)->regs_available = false;
292
293 *ffa_ret = (struct ffa_value){.func = FFA_INTERRUPT_32};
294 ret = true;
295 goto out;
296 }
297
298 dlog_verbose("Finished initializing all VMs.\n");
299 }
300
301out:
302 vm_unlock(&current_vm_locked);
303 return ret;
304}
305#endif
306
Andrew Scullae9962e2019-10-03 16:51:16 +0100307/**
308 * Checks whether to block an SMC being forwarded from a VM.
309 */
310static bool smc_is_blocked(const struct vm *vm, uint32_t func)
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +0100311{
Andrew Scullae9962e2019-10-03 16:51:16 +0100312 bool block_by_default = !vm->smc_whitelist.permissive;
Fuad Tabba8176e3e2019-08-01 10:40:36 +0100313
Andrew Scullae9962e2019-10-03 16:51:16 +0100314 for (size_t i = 0; i < vm->smc_whitelist.smc_count; ++i) {
315 if (func == vm->smc_whitelist.smcs[i]) {
316 return false;
317 }
318 }
Fuad Tabba8176e3e2019-08-01 10:40:36 +0100319
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100320 dlog_notice("SMC %#010x attempted from VM %#x, blocked=%u\n", func,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000321 vm->id, block_by_default);
Andrew Scullae9962e2019-10-03 16:51:16 +0100322
323 /* Access is still allowed in permissive mode. */
324 return block_by_default;
Fuad Tabba8176e3e2019-08-01 10:40:36 +0100325}
326
327/**
Andrew Scullae9962e2019-10-03 16:51:16 +0100328 * Applies SMC access control according to manifest and forwards the call if
329 * access is granted.
Fuad Tabba8176e3e2019-08-01 10:40:36 +0100330 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100331static void smc_forwarder(const struct vm *vm, struct ffa_value *args)
Fuad Tabba8176e3e2019-08-01 10:40:36 +0100332{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100333 struct ffa_value ret;
Andrew Walbran9dadaf22019-12-05 16:50:55 +0000334 uint32_t client_id = vm->id;
335 uintreg_t arg7 = args->arg7;
Andrew Scullae9962e2019-10-03 16:51:16 +0100336
Andrew Walbran9dadaf22019-12-05 16:50:55 +0000337 if (smc_is_blocked(vm, args->func)) {
338 args->func = SMCCC_ERROR_UNKNOWN;
Andrew Scullae9962e2019-10-03 16:51:16 +0100339 return;
340 }
341
Andrew Walbran0dd67ff2019-09-12 16:38:50 +0100342 /*
343 * Set the Client ID but keep the existing Secure OS ID and anything
344 * else (currently unspecified) that the client may have passed in the
345 * upper bits.
346 */
Andrew Walbran9dadaf22019-12-05 16:50:55 +0000347 args->arg7 = client_id | (arg7 & ~CLIENT_ID_MASK);
Andrew Scull07b6bd32019-12-12 17:19:55 +0000348 ret = smc_forward(args->func, args->arg1, args->arg2, args->arg3,
349 args->arg4, args->arg5, args->arg6, args->arg7);
Fuad Tabba8176e3e2019-08-01 10:40:36 +0100350
Andrew Scullae9962e2019-10-03 16:51:16 +0100351 /*
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000352 * Preserve the value passed by the caller, rather than the generated
353 * client_id. Note that this would also overwrite any return value that
Andrew Scullae9962e2019-10-03 16:51:16 +0100354 * may be in x7, but the SMCs that we are forwarding are legacy calls
355 * from before SMCCC 1.2 so won't have more than 4 return values anyway.
356 */
Andrew Scull07b6bd32019-12-12 17:19:55 +0000357 ret.arg7 = arg7;
358
359 plat_smc_post_forward(*args, &ret);
360
361 *args = ret;
Fuad Tabba8176e3e2019-08-01 10:40:36 +0100362}
363
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200364/**
365 * In the normal world, ffa_handler is always called from the virtual FF-A
Andrew Walbran8e8bf3f2020-10-07 17:58:20 +0100366 * instance (from a VM in EL1). In the secure world, ffa_handler may be called
367 * from the virtual (a secure partition in S-EL1) or physical FF-A instance
368 * (from the normal world via EL3). The function returns true when the call is
369 * handled. The *next pointer is updated to the next vCPU to run, which might be
370 * the 'other world' vCPU if the call originated from the virtual FF-A instance
371 * and has to be forwarded down to EL3, or left as is to resume the current
372 * vCPU.
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200373 */
374static bool ffa_handler(struct ffa_value *args, struct vcpu *current,
375 struct vcpu **next)
Andrew Walbran7d28d9a2019-08-30 16:24:58 +0100376{
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000377 uint32_t func = args->func & ~SMCCC_CONVENTION_MASK;
378
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100379 /*
380 * NOTE: When adding new methods to this handler update
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100381 * api_ffa_features accordingly.
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100382 */
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000383 switch (func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100384 case FFA_VERSION_32:
385 *args = api_ffa_version(args->arg1);
Andrew Walbran7d28d9a2019-08-30 16:24:58 +0100386 return true;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100387 case FFA_PARTITION_INFO_GET_32: {
388 struct ffa_uuid uuid;
389
390 ffa_uuid_init(args->arg1, args->arg2, args->arg3, args->arg4,
391 &uuid);
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200392 *args = api_ffa_partition_info_get(current, &uuid);
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100393 return true;
394 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100395 case FFA_ID_GET_32:
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200396 *args = api_ffa_id_get(current);
Andrew Walbrand230f662019-10-07 18:03:36 +0100397 return true;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100398 case FFA_FEATURES_32:
399 *args = api_ffa_features(args->arg1);
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100400 return true;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100401 case FFA_RX_RELEASE_32:
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200402 *args = api_ffa_rx_release(current, next);
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000403 return true;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100404 case FFA_RXTX_MAP_32:
405 *args = api_ffa_rxtx_map(ipa_init(args->arg1),
406 ipa_init(args->arg2), args->arg3,
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200407 current, next);
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000408 return true;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100409 case FFA_YIELD_32:
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200410 *args = api_yield(current, next);
Andrew Walbran7d28d9a2019-08-30 16:24:58 +0100411 return true;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100412 case FFA_MSG_SEND_32:
413 *args = api_ffa_msg_send(
414 ffa_msg_send_sender(*args),
415 ffa_msg_send_receiver(*args), ffa_msg_send_size(*args),
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200416 ffa_msg_send_attributes(*args), current, next);
Andrew Walbran7d28d9a2019-08-30 16:24:58 +0100417 return true;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100418 case FFA_MSG_WAIT_32:
J-Alvesb37fd082020-10-22 12:29:21 +0100419#if SECURE_WORLD == 1
420 if (sp_boot_next(current, next, args)) {
421 return true;
422 }
423#endif
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200424 *args = api_ffa_msg_recv(true, current, next);
Andrew Walbran0de4f162019-09-03 16:44:20 +0100425 return true;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100426 case FFA_MSG_POLL_32:
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200427 *args = api_ffa_msg_recv(false, current, next);
Andrew Walbran7d28d9a2019-08-30 16:24:58 +0100428 return true;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100429 case FFA_RUN_32:
430 *args = api_ffa_run(ffa_vm_id(*args), ffa_vcpu_index(*args),
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200431 current, next);
Andrew Walbranf0c314d2019-10-02 14:24:26 +0100432 return true;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100433 case FFA_MEM_DONATE_32:
434 case FFA_MEM_LEND_32:
435 case FFA_MEM_SHARE_32:
436 *args = api_ffa_mem_send(func, args->arg1, args->arg2,
437 ipa_init(args->arg3), args->arg4,
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200438 current);
Andrew Walbran82d6d152019-12-24 15:02:06 +0000439 return true;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100440 case FFA_MEM_RETRIEVE_REQ_32:
441 *args = api_ffa_mem_retrieve_req(args->arg1, args->arg2,
442 ipa_init(args->arg3),
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200443 args->arg4, current);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000444 return true;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100445 case FFA_MEM_RELINQUISH_32:
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200446 *args = api_ffa_mem_relinquish(current);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000447 return true;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100448 case FFA_MEM_RECLAIM_32:
449 *args = api_ffa_mem_reclaim(
Andrew Walbran1bbe9402020-04-30 16:47:13 +0100450 ffa_assemble_handle(args->arg1, args->arg2), args->arg3,
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200451 current);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000452 return true;
Andrew Walbranca808b12020-05-15 17:22:28 +0100453 case FFA_MEM_FRAG_RX_32:
454 *args = api_ffa_mem_frag_rx(ffa_frag_handle(*args), args->arg3,
455 (args->arg4 >> 16) & 0xffff,
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200456 current);
Andrew Walbranca808b12020-05-15 17:22:28 +0100457 return true;
458 case FFA_MEM_FRAG_TX_32:
459 *args = api_ffa_mem_frag_tx(ffa_frag_handle(*args), args->arg3,
460 (args->arg4 >> 16) & 0xffff,
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200461 current);
Andrew Walbranca808b12020-05-15 17:22:28 +0100462 return true;
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000463 case FFA_MSG_SEND_DIRECT_REQ_32:
464 *args = api_ffa_msg_send_direct_req(
465 ffa_msg_send_sender(*args),
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200466 ffa_msg_send_receiver(*args), *args, current, next);
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000467 return true;
468 case FFA_MSG_SEND_DIRECT_RESP_32:
469 *args = api_ffa_msg_send_direct_resp(
470 ffa_msg_send_sender(*args),
Olivier Deprezf33a6c72020-06-09 18:28:45 +0200471 ffa_msg_send_receiver(*args), *args, current, next);
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000472 return true;
Max Shvetsov40108e72020-08-27 12:39:50 +0100473 case FFA_SECONDARY_EP_REGISTER_32:
474 *args = api_ffa_secondary_ep_register(ipa_init(args->arg1),
475 current);
476 return true;
Andrew Walbranf0c314d2019-10-02 14:24:26 +0100477 }
Andrew Walbran7d28d9a2019-08-30 16:24:58 +0100478
479 return false;
480}
481
482/**
483 * Set or clear VI bit according to pending interrupts.
484 */
485static void update_vi(struct vcpu *next)
486{
487 if (next == NULL) {
488 /*
489 * Not switching vCPUs, set the bit for the current vCPU
490 * directly in the register.
491 */
492 struct vcpu *vcpu = current();
493
494 sl_lock(&vcpu->lock);
495 set_virtual_interrupt_current(
496 vcpu->interrupts.enabled_and_pending_count > 0);
497 sl_unlock(&vcpu->lock);
Olivier Deprez3caed1c2021-02-05 12:07:36 +0100498 } else if (vm_id_is_current_world(next->vm->id)) {
Andrew Walbran7d28d9a2019-08-30 16:24:58 +0100499 /*
500 * About to switch vCPUs, set the bit for the vCPU to which we
501 * are switching in the saved copy of the register.
502 */
503 sl_lock(&next->lock);
504 set_virtual_interrupt(
505 &next->regs,
506 next->interrupts.enabled_and_pending_count > 0);
507 sl_unlock(&next->lock);
508 }
509}
510
Fuad Tabba8176e3e2019-08-01 10:40:36 +0100511/**
Andrew Walbrand8d3f5d2020-10-07 18:23:01 +0100512 * Handles PSCI and FF-A calls and writes the return value back to the registers
513 * of the vCPU. This is shared between smc_handler and hvc_handler.
514 *
515 * Returns true if the call was handled.
516 */
517static bool hvc_smc_handler(struct ffa_value args, struct vcpu *vcpu,
518 struct vcpu **next)
519{
Olivier Deprez3caed1c2021-02-05 12:07:36 +0100520 /* Do not expect PSCI calls emitted from within the secure world. */
521#if SECURE_WORLD == 0
Andrew Walbrand8d3f5d2020-10-07 18:23:01 +0100522 if (psci_handler(vcpu, args.func, args.arg1, args.arg2, args.arg3,
523 &vcpu->regs.r[0], next)) {
524 return true;
525 }
Olivier Deprez3caed1c2021-02-05 12:07:36 +0100526#endif
Andrew Walbrand8d3f5d2020-10-07 18:23:01 +0100527
Andrew Walbrand8d3f5d2020-10-07 18:23:01 +0100528 if (ffa_handler(&args, vcpu, next)) {
Andrew Walbrand8d3f5d2020-10-07 18:23:01 +0100529 arch_regs_set_retval(&vcpu->regs, args);
530 update_vi(*next);
531 return true;
532 }
533
534 return false;
535}
536
537/**
Fuad Tabba8176e3e2019-08-01 10:40:36 +0100538 * Processes SMC instruction calls.
539 */
Andrew Walbran9dadaf22019-12-05 16:50:55 +0000540static struct vcpu *smc_handler(struct vcpu *vcpu)
Fuad Tabba8176e3e2019-08-01 10:40:36 +0100541{
Andrew Walbrand8d3f5d2020-10-07 18:23:01 +0100542 struct ffa_value args = arch_regs_get_args(&vcpu->regs);
Andrew Walbran9dadaf22019-12-05 16:50:55 +0000543 struct vcpu *next = NULL;
Fuad Tabba8176e3e2019-08-01 10:40:36 +0100544
Andrew Walbrand8d3f5d2020-10-07 18:23:01 +0100545 if (hvc_smc_handler(args, vcpu, &next)) {
Andrew Walbran9dadaf22019-12-05 16:50:55 +0000546 return next;
Andrew Walbran4579f7002019-08-30 16:24:58 +0100547 }
548
Andrew Walbran85c37662019-12-05 16:29:33 +0000549 switch (args.func & ~SMCCC_CONVENTION_MASK) {
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +0100550 case HF_DEBUG_LOG:
Andrew Walbran9dadaf22019-12-05 16:50:55 +0000551 vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu);
Andrew Scull07b6bd32019-12-12 17:19:55 +0000552 return NULL;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +0100553 }
554
Andrew Walbran9dadaf22019-12-05 16:50:55 +0000555 smc_forwarder(vcpu->vm, &args);
556 arch_regs_set_retval(&vcpu->regs, args);
Andrew Scull07b6bd32019-12-12 17:19:55 +0000557 return NULL;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +0100558}
559
Olivier Deprez3caed1c2021-02-05 12:07:36 +0100560#if SECURE_WORLD == 1
561
562/**
563 * Called from other_world_loop return from SMC.
564 * Processes SMC calls originating from the NWd.
565 */
566struct vcpu *smc_handler_from_nwd(struct vcpu *vcpu)
567{
568 struct ffa_value args = arch_regs_get_args(&vcpu->regs);
569 struct vcpu *next = NULL;
570
571 if (hvc_smc_handler(args, vcpu, &next)) {
572 return next;
573 }
574
575 /*
576 * If the SMC emitted by the normal world is not handled in the secure
577 * world then return an error stating such ABI is not supported. Only
578 * FF-A calls are supported. We cannot return SMCCC_ERROR_UNKNOWN
579 * directly because the SPMD smc handler would not recognize it as a
580 * standard FF-A call returning from the SPMC.
581 */
582 arch_regs_set_retval(&vcpu->regs, ffa_error(FFA_NOT_SUPPORTED));
583
584 return NULL;
585}
586
587#endif
588
Fuad Tabbaa48d1222019-12-09 15:42:32 +0000589/*
590 * Exception vector offsets.
591 * See Arm Architecture Reference Manual Armv8-A, D1.10.2.
592 */
593
594/**
595 * Offset for synchronous exceptions at current EL with SPx.
596 */
597#define OFFSET_CURRENT_SPX UINT64_C(0x200)
598
599/**
600 * Offset for synchronous exceptions at lower EL using AArch64.
601 */
602#define OFFSET_LOWER_EL_64 UINT64_C(0x400)
603
604/**
605 * Offset for synchronous exceptions at lower EL using AArch32.
606 */
607#define OFFSET_LOWER_EL_32 UINT64_C(0x600)
608
609/**
610 * Returns the address for the exception handler at EL1.
611 */
612static uintreg_t get_el1_exception_handler_addr(const struct vcpu *vcpu)
613{
614 uintreg_t base_addr = read_msr(vbar_el1);
615 uintreg_t pe_mode = vcpu->regs.spsr & PSR_PE_MODE_MASK;
616 bool is_arch32 = vcpu->regs.spsr & PSR_ARCH_MODE_32;
617
618 if (pe_mode == PSR_PE_MODE_EL0T) {
619 if (is_arch32) {
620 base_addr += OFFSET_LOWER_EL_32;
621 } else {
622 base_addr += OFFSET_LOWER_EL_64;
623 }
624 } else {
625 CHECK(!is_arch32);
626 base_addr += OFFSET_CURRENT_SPX;
627 }
628
629 return base_addr;
630}
631
632/**
Fuad Tabbab86325a2020-01-10 13:38:15 +0000633 * Injects an exception with the specified Exception Syndrom Register value into
634 * the EL1.
Fuad Tabbaa48d1222019-12-09 15:42:32 +0000635 *
636 * NOTE: This function assumes that the lazy registers haven't been saved, and
637 * writes to the lazy registers of the CPU directly instead of the vCPU.
638 */
Fuad Tabbac3847c72020-08-11 09:32:25 +0100639static void inject_el1_exception(struct vcpu *vcpu, uintreg_t esr_el1_value,
640 uintreg_t far_el1_value)
Fuad Tabbaa48d1222019-12-09 15:42:32 +0000641{
Fuad Tabbaa48d1222019-12-09 15:42:32 +0000642 uintreg_t handler_address = get_el1_exception_handler_addr(vcpu);
Fuad Tabbaa48d1222019-12-09 15:42:32 +0000643
644 /* Update the CPU state to inject the exception. */
645 write_msr(esr_el1, esr_el1_value);
Fuad Tabbac3847c72020-08-11 09:32:25 +0100646 write_msr(far_el1, far_el1_value);
Fuad Tabbaa48d1222019-12-09 15:42:32 +0000647 write_msr(elr_el1, vcpu->regs.pc);
648 write_msr(spsr_el1, vcpu->regs.spsr);
649
650 /*
651 * Mask (disable) interrupts and run in EL1h mode.
652 * EL1h mode is used because by default, taking an exception selects the
653 * stack pointer for the target Exception level. The software can change
654 * that later in the handler if needed.
Fuad Tabbaa48d1222019-12-09 15:42:32 +0000655 */
656 vcpu->regs.spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H;
657
658 /* Transfer control to the exception hander. */
659 vcpu->regs.pc = handler_address;
Fuad Tabbab86325a2020-01-10 13:38:15 +0000660}
661
662/**
663 * Injects a Data Abort exception (same exception level).
664 */
665static void inject_el1_data_abort_exception(struct vcpu *vcpu,
Fuad Tabbac3847c72020-08-11 09:32:25 +0100666 uintreg_t esr_el2,
667 uintreg_t far_el2)
Fuad Tabbab86325a2020-01-10 13:38:15 +0000668{
669 /*
670 * ISS encoding remains the same, but the EC is changed to reflect
671 * where the exception came from.
672 * See Arm Architecture Reference Manual Armv8-A, pages D13-2943/2982.
673 */
674 uintreg_t esr_el1_value = GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) |
675 (EC_DATA_ABORT_SAME_EL << ESR_EC_OFFSET);
676
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100677 dlog_notice("Injecting Data Abort exception into VM %#x.\n",
Andrew Walbran17eebf92020-02-05 16:35:49 +0000678 vcpu->vm->id);
Fuad Tabbab86325a2020-01-10 13:38:15 +0000679
Fuad Tabbac3847c72020-08-11 09:32:25 +0100680 inject_el1_exception(vcpu, esr_el1_value, far_el2);
Fuad Tabbab86325a2020-01-10 13:38:15 +0000681}
682
683/**
684 * Injects a Data Abort exception (same exception level).
685 */
686static void inject_el1_instruction_abort_exception(struct vcpu *vcpu,
Fuad Tabbac3847c72020-08-11 09:32:25 +0100687 uintreg_t esr_el2,
688 uintreg_t far_el2)
Fuad Tabbab86325a2020-01-10 13:38:15 +0000689{
690 /*
691 * ISS encoding remains the same, but the EC is changed to reflect
692 * where the exception came from.
693 * See Arm Architecture Reference Manual Armv8-A, pages D13-2941/2980.
694 */
695 uintreg_t esr_el1_value =
696 GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) |
697 (EC_INSTRUCTION_ABORT_SAME_EL << ESR_EC_OFFSET);
698
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100699 dlog_notice("Injecting Instruction Abort exception into VM %#x.\n",
Andrew Walbran17eebf92020-02-05 16:35:49 +0000700 vcpu->vm->id);
Fuad Tabbab86325a2020-01-10 13:38:15 +0000701
Fuad Tabbac3847c72020-08-11 09:32:25 +0100702 inject_el1_exception(vcpu, esr_el1_value, far_el2);
Fuad Tabbab86325a2020-01-10 13:38:15 +0000703}
704
705/**
706 * Injects an exception with an unknown reason into the EL1.
707 */
708static void inject_el1_unknown_exception(struct vcpu *vcpu, uintreg_t esr_el2)
709{
710 uintreg_t esr_el1_value =
711 GET_ESR_IL(esr_el2) | (EC_UNKNOWN << ESR_EC_OFFSET);
Fuad Tabbac3847c72020-08-11 09:32:25 +0100712
713 /*
714 * The value of the far_el2 register is UNKNOWN in this case,
715 * therefore, don't propagate it to avoid leaking sensitive information.
716 */
717 uintreg_t far_el1_value = 0;
Fuad Tabbab86325a2020-01-10 13:38:15 +0000718 char *direction_str;
Fuad Tabbaa48d1222019-12-09 15:42:32 +0000719
720 direction_str = ISS_IS_READ(esr_el2) ? "read" : "write";
Andrew Walbran17eebf92020-02-05 16:35:49 +0000721 dlog_notice(
722 "Trapped access to system register %s: op0=%d, op1=%d, crn=%d, "
723 "crm=%d, op2=%d, rt=%d.\n",
724 direction_str, GET_ISS_OP0(esr_el2), GET_ISS_OP1(esr_el2),
725 GET_ISS_CRN(esr_el2), GET_ISS_CRM(esr_el2),
726 GET_ISS_OP2(esr_el2), GET_ISS_RT(esr_el2));
Fuad Tabbaa48d1222019-12-09 15:42:32 +0000727
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100728 dlog_notice("Injecting Unknown Reason exception into VM %#x.\n",
Andrew Walbran17eebf92020-02-05 16:35:49 +0000729 vcpu->vm->id);
Fuad Tabbaa48d1222019-12-09 15:42:32 +0000730
Fuad Tabbac3847c72020-08-11 09:32:25 +0100731 inject_el1_exception(vcpu, esr_el1_value, far_el1_value);
Fuad Tabbaa48d1222019-12-09 15:42:32 +0000732}
733
Andrew Walbrand8d3f5d2020-10-07 18:23:01 +0100734static struct vcpu *hvc_handler(struct vcpu *vcpu)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100735{
Andrew Walbrand8d3f5d2020-10-07 18:23:01 +0100736 struct ffa_value args = arch_regs_get_args(&vcpu->regs);
Andrew Walbran59182d52019-09-23 17:55:39 +0100737 struct vcpu *next = NULL;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100738
Andrew Walbrand8d3f5d2020-10-07 18:23:01 +0100739 if (hvc_smc_handler(args, vcpu, &next)) {
Andrew Walbran59182d52019-09-23 17:55:39 +0100740 return next;
Andrew Walbran7d28d9a2019-08-30 16:24:58 +0100741 }
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100742
Andrew Walbran7f920af2019-09-03 17:09:30 +0100743 switch (args.func) {
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000744 case HF_MAILBOX_WRITABLE_GET:
Andrew Walbran59182d52019-09-23 17:55:39 +0100745 vcpu->regs.r[0] = api_mailbox_writable_get(vcpu);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000746 break;
747
748 case HF_MAILBOX_WAITER_GET:
Andrew Walbran7f920af2019-09-03 17:09:30 +0100749 vcpu->regs.r[0] = api_mailbox_waiter_get(args.arg1, vcpu);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100750 break;
751
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000752 case HF_INTERRUPT_ENABLE:
Andrew Walbran7f920af2019-09-03 17:09:30 +0100753 vcpu->regs.r[0] =
754 api_interrupt_enable(args.arg1, args.arg2, vcpu);
Andrew Walbran318f5732018-11-20 16:23:42 +0000755 break;
756
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000757 case HF_INTERRUPT_GET:
Andrew Walbran59182d52019-09-23 17:55:39 +0100758 vcpu->regs.r[0] = api_interrupt_get(vcpu);
Andrew Walbran318f5732018-11-20 16:23:42 +0000759 break;
760
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000761 case HF_INTERRUPT_INJECT:
Andrew Walbran7f920af2019-09-03 17:09:30 +0100762 vcpu->regs.r[0] = api_interrupt_inject(args.arg1, args.arg2,
763 args.arg3, vcpu, &next);
Andrew Walbran318f5732018-11-20 16:23:42 +0000764 break;
765
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +0100766 case HF_DEBUG_LOG:
Andrew Walbran7f920af2019-09-03 17:09:30 +0100767 vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu);
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +0100768 break;
769
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100770 default:
Andrew Walbran59182d52019-09-23 17:55:39 +0100771 vcpu->regs.r[0] = SMCCC_ERROR_UNKNOWN;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100772 }
773
Andrew Walbran59182d52019-09-23 17:55:39 +0100774 update_vi(next);
Andrew Walbran3d84a262018-12-13 14:41:19 +0000775
Andrew Walbran59182d52019-09-23 17:55:39 +0100776 return next;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100777}
778
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100779struct vcpu *irq_lower(void)
780{
Andrew Scull9726c252019-01-23 13:44:19 +0000781 /*
782 * Switch back to primary VM, interrupts will be handled there.
783 *
784 * If the VM has aborted, this vCPU will be aborted when the scheduler
785 * tries to run it again. This means the interrupt will not be delayed
786 * by the aborted VM.
787 *
788 * TODO: Only switch when the interrupt isn't for the current VM.
789 */
Andrew Scull33fecd32019-01-08 14:48:27 +0000790 return api_preempt(current());
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100791}
792
Wedson Almeida Filho9d5040f2018-10-29 08:41:27 +0000793struct vcpu *fiq_lower(void)
794{
795 return irq_lower();
796}
797
Fuad Tabbad1d67982020-01-08 11:28:29 +0000798noreturn struct vcpu *serr_lower(void)
Wedson Almeida Filho9d5040f2018-10-29 08:41:27 +0000799{
Fuad Tabbad1d67982020-01-08 11:28:29 +0000800 /*
801 * SError exceptions should be isolated and handled by the responsible
802 * VM/exception level. Getting here indicates a bug, that isolation is
803 * not working, or a processor that does not support ARMv8.2-IESB, in
804 * which case Hafnium routes SError exceptions to EL2 (here).
805 */
806 panic("SError from a lower exception level.");
Wedson Almeida Filho9d5040f2018-10-29 08:41:27 +0000807}
808
Wedson Almeida Filho99d2d4c2019-02-14 12:53:46 +0000809/**
810 * Initialises a fault info structure. It assumes that an FnV bit exists at
811 * bit offset 10 of the ESR, and that it is only valid when the bottom 6 bits of
812 * the ESR (the fault status code) are 010000; this is the case for both
813 * instruction and data aborts, but not necessarily for other exception reasons.
814 */
815static struct vcpu_fault_info fault_info_init(uintreg_t esr,
Andrew Walbran1281ed42019-10-22 17:23:40 +0100816 const struct vcpu *vcpu,
817 uint32_t mode)
Wedson Almeida Filho99d2d4c2019-02-14 12:53:46 +0000818{
819 uint32_t fsc = esr & 0x3f;
820 struct vcpu_fault_info r;
Olivier Deprez98ad2d22020-05-20 09:52:43 +0200821 uint64_t hpfar_el2_val;
822 uint64_t hpfar_el2_fipa;
Wedson Almeida Filho99d2d4c2019-02-14 12:53:46 +0000823
824 r.mode = mode;
Wedson Almeida Filho99d2d4c2019-02-14 12:53:46 +0000825 r.pc = va_init(vcpu->regs.pc);
826
Olivier Deprez98ad2d22020-05-20 09:52:43 +0200827 /* Get Hypervisor IPA Fault Address value. */
828 hpfar_el2_val = read_msr(hpfar_el2);
829
830 /* Extract Faulting IPA. */
831 hpfar_el2_fipa = (hpfar_el2_val & HPFAR_EL2_FIPA) << 8;
832
833#if SECURE_WORLD == 1
834
835 /**
836 * Determine if faulting IPA targets NS space.
837 * At NS-EL2 hpfar_el2 bit 63 is RES0. At S-EL2, this bit determines if
838 * the faulting Stage-1 address output is a secure or non-secure IPA.
839 */
840 if ((hpfar_el2_val & HPFAR_EL2_NS) != 0) {
841 r.mode |= MM_MODE_NS;
842 }
843
844#endif
845
Wedson Almeida Filho99d2d4c2019-02-14 12:53:46 +0000846 /*
847 * Check the FnV bit, which is only valid if dfsc/ifsc is 010000. It
848 * indicates that we cannot rely on far_el2.
849 */
Andrew Walbrane52006c2019-10-22 18:01:28 +0100850 if (fsc == 0x10 && esr & (1U << 10)) {
Wedson Almeida Filho99d2d4c2019-02-14 12:53:46 +0000851 r.vaddr = va_init(0);
Olivier Deprez98ad2d22020-05-20 09:52:43 +0200852 r.ipaddr = ipa_init(hpfar_el2_fipa);
Wedson Almeida Filho99d2d4c2019-02-14 12:53:46 +0000853 } else {
854 r.vaddr = va_init(read_msr(far_el2));
Olivier Deprez98ad2d22020-05-20 09:52:43 +0200855 r.ipaddr = ipa_init(hpfar_el2_fipa |
Wedson Almeida Filho99d2d4c2019-02-14 12:53:46 +0000856 (read_msr(far_el2) & (PAGE_SIZE - 1)));
857 }
858
859 return r;
860}
861
Fuad Tabbac3847c72020-08-11 09:32:25 +0100862struct vcpu *sync_lower_exception(uintreg_t esr, uintreg_t far)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100863{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100864 struct vcpu *vcpu = current();
Wedson Almeida Filho99d2d4c2019-02-14 12:53:46 +0000865 struct vcpu_fault_info info;
Jose Marinho135dff32019-02-28 10:25:57 +0000866 struct vcpu *new_vcpu;
Fuad Tabba3e9b0222019-11-11 16:47:50 +0000867 uintreg_t ec = GET_ESR_EC(esr);
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100868
Fuad Tabbac76466d2019-09-06 10:42:12 +0100869 switch (ec) {
Fuad Tabbab86325a2020-01-10 13:38:15 +0000870 case EC_WFI_WFE:
Andrew Walbran48196eb2019-03-04 14:56:24 +0000871 /* Skip the instruction. */
Fuad Tabbac76466d2019-09-06 10:42:12 +0100872 vcpu->regs.pc += GET_NEXT_PC_INC(esr);
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100873 /* Check TI bit of ISS, 0 = WFI, 1 = WFE. */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100874 if (esr & 1) {
Andrew Walbran48196eb2019-03-04 14:56:24 +0000875 /* WFE */
876 /*
877 * TODO: consider giving the scheduler more context,
878 * somehow.
879 */
Andrew Walbran16075b62019-09-03 17:11:07 +0100880 api_yield(vcpu, &new_vcpu);
Jose Marinho135dff32019-02-28 10:25:57 +0000881 return new_vcpu;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100882 }
Andrew Walbran48196eb2019-03-04 14:56:24 +0000883 /* WFI */
Andrew Scull9726c252019-01-23 13:44:19 +0000884 return api_wait_for_interrupt(vcpu);
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100885
Fuad Tabbab86325a2020-01-10 13:38:15 +0000886 case EC_DATA_ABORT_LOWER_EL:
Wedson Almeida Filho99d2d4c2019-02-14 12:53:46 +0000887 info = fault_info_init(
Andrew Walbrane52006c2019-10-22 18:01:28 +0100888 esr, vcpu, (esr & (1U << 6)) ? MM_MODE_W : MM_MODE_R);
Wedson Almeida Filho99d2d4c2019-02-14 12:53:46 +0000889 if (vcpu_handle_page_fault(vcpu, &info)) {
890 return NULL;
891 }
Fuad Tabbab86325a2020-01-10 13:38:15 +0000892 /* Inform the EL1 of the data abort. */
Fuad Tabbac3847c72020-08-11 09:32:25 +0100893 inject_el1_data_abort_exception(vcpu, esr, far);
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100894
Fuad Tabbab86325a2020-01-10 13:38:15 +0000895 /* Schedule the same VM to continue running. */
896 return NULL;
897
898 case EC_INSTRUCTION_ABORT_LOWER_EL:
Andrew Sculld3cfaad2019-04-04 11:34:10 +0100899 info = fault_info_init(esr, vcpu, MM_MODE_X);
Wedson Almeida Filho99d2d4c2019-02-14 12:53:46 +0000900 if (vcpu_handle_page_fault(vcpu, &info)) {
901 return NULL;
902 }
Fuad Tabbab86325a2020-01-10 13:38:15 +0000903 /* Inform the EL1 of the instruction abort. */
Fuad Tabbac3847c72020-08-11 09:32:25 +0100904 inject_el1_instruction_abort_exception(vcpu, esr, far);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100905
Fuad Tabbab86325a2020-01-10 13:38:15 +0000906 /* Schedule the same VM to continue running. */
907 return NULL;
908
909 case EC_HVC:
Andrew Walbran59182d52019-09-23 17:55:39 +0100910 return hvc_handler(vcpu);
911
Fuad Tabbab86325a2020-01-10 13:38:15 +0000912 case EC_SMC: {
Andrew Scullc960c032018-10-24 15:13:35 +0100913 uintreg_t smc_pc = vcpu->regs.pc;
Andrew Walbran9dadaf22019-12-05 16:50:55 +0000914 struct vcpu *next = smc_handler(vcpu);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100915
916 /* Skip the SMC instruction. */
Fuad Tabbac76466d2019-09-06 10:42:12 +0100917 vcpu->regs.pc = smc_pc + GET_NEXT_PC_INC(esr);
Andrew Walbran9dadaf22019-12-05 16:50:55 +0000918
Andrew Walbran33645652019-04-15 12:29:31 +0100919 return next;
Andrew Scullc960c032018-10-24 15:13:35 +0100920 }
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100921
Fuad Tabbab86325a2020-01-10 13:38:15 +0000922 case EC_MSR:
Fuad Tabbac76466d2019-09-06 10:42:12 +0100923 /*
924 * NOTE: This should never be reached because it goes through a
925 * separate path handled by handle_system_register_access().
926 */
927 panic("Handled by handle_system_register_access().");
928
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100929 default:
Andrew Walbran17eebf92020-02-05 16:35:49 +0000930 dlog_notice(
931 "Unknown lower sync exception pc=%#x, esr=%#x, "
932 "ec=%#x\n",
933 vcpu->regs.pc, esr, ec);
Andrew Scull9726c252019-01-23 13:44:19 +0000934 break;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100935 }
936
Fuad Tabba3e9b0222019-11-11 16:47:50 +0000937 /*
Fuad Tabbaa48d1222019-12-09 15:42:32 +0000938 * The exception wasn't handled. Inject to the VM to give it chance to
939 * handle as an unknown exception.
Fuad Tabba3e9b0222019-11-11 16:47:50 +0000940 */
Fuad Tabbab86325a2020-01-10 13:38:15 +0000941 inject_el1_unknown_exception(vcpu, esr);
942
943 /* Schedule the same VM to continue running. */
944 return NULL;
Fuad Tabba3e9b0222019-11-11 16:47:50 +0000945}
946
Fuad Tabbac76466d2019-09-06 10:42:12 +0100947/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000948 * Handles EC = 011000, MSR, MRS instruction traps.
Fuad Tabbaed294af2019-12-20 10:43:01 +0000949 * Returns non-null ONLY if the access failed and the vCPU is changing.
Fuad Tabbac76466d2019-09-06 10:42:12 +0100950 */
Fuad Tabbab86325a2020-01-10 13:38:15 +0000951void handle_system_register_access(uintreg_t esr_el2)
Fuad Tabbac76466d2019-09-06 10:42:12 +0100952{
953 struct vcpu *vcpu = current();
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100954 ffa_vm_id_t vm_id = vcpu->vm->id;
Fuad Tabba3e9b0222019-11-11 16:47:50 +0000955 uintreg_t ec = GET_ESR_EC(esr_el2);
Fuad Tabbac76466d2019-09-06 10:42:12 +0100956
Fuad Tabbab86325a2020-01-10 13:38:15 +0000957 CHECK(ec == EC_MSR);
Fuad Tabbac76466d2019-09-06 10:42:12 +0100958 /*
Fuad Tabbaf1d6dc52019-09-18 17:33:14 +0100959 * Handle accesses to debug and performance monitor registers.
Fuad Tabba3e9b0222019-11-11 16:47:50 +0000960 * Inject an exception for unhandled/unsupported registers.
Fuad Tabbac76466d2019-09-06 10:42:12 +0100961 */
Fuad Tabba3e9b0222019-11-11 16:47:50 +0000962 if (debug_el1_is_register_access(esr_el2)) {
963 if (!debug_el1_process_access(vcpu, vm_id, esr_el2)) {
Fuad Tabbab86325a2020-01-10 13:38:15 +0000964 inject_el1_unknown_exception(vcpu, esr_el2);
965 return;
Fuad Tabbaf1d6dc52019-09-18 17:33:14 +0100966 }
Fuad Tabba3e9b0222019-11-11 16:47:50 +0000967 } else if (perfmon_is_register_access(esr_el2)) {
968 if (!perfmon_process_access(vcpu, vm_id, esr_el2)) {
Fuad Tabbab86325a2020-01-10 13:38:15 +0000969 inject_el1_unknown_exception(vcpu, esr_el2);
970 return;
Fuad Tabbaf1d6dc52019-09-18 17:33:14 +0100971 }
Fuad Tabba77a4b012019-11-15 12:13:08 +0000972 } else if (feature_id_is_register_access(esr_el2)) {
973 if (!feature_id_process_access(vcpu, esr_el2)) {
Fuad Tabbab86325a2020-01-10 13:38:15 +0000974 inject_el1_unknown_exception(vcpu, esr_el2);
975 return;
Fuad Tabba77a4b012019-11-15 12:13:08 +0000976 }
Fuad Tabbaf1d6dc52019-09-18 17:33:14 +0100977 } else {
Fuad Tabbab86325a2020-01-10 13:38:15 +0000978 inject_el1_unknown_exception(vcpu, esr_el2);
979 return;
Fuad Tabbac76466d2019-09-06 10:42:12 +0100980 }
981
Fuad Tabbaf1d6dc52019-09-18 17:33:14 +0100982 /* Instruction was fulfilled. Skip it and run the next one. */
Fuad Tabba3e9b0222019-11-11 16:47:50 +0000983 vcpu->regs.pc += GET_NEXT_PC_INC(esr_el2);
Fuad Tabbac76466d2019-09-06 10:42:12 +0100984}