Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 9 | #include <stdnoreturn.h> |
| 10 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 11 | #include "hf/arch/barriers.h" |
Madhukar Pappireddy | 77d3bcd | 2023-03-01 17:26:22 -0600 | [diff] [blame] | 12 | #include "hf/arch/gicv3.h" |
Madhukar Pappireddy | a3787c9 | 2024-09-25 14:50:36 -0500 | [diff] [blame] | 13 | #include "hf/arch/host_timer.h" |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 14 | #include "hf/arch/init.h" |
J-Alves | a2d1c3b | 2024-03-28 12:46:58 +0000 | [diff] [blame] | 15 | #include "hf/arch/memcpy_trapped.h" |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 16 | #include "hf/arch/mmu.h" |
Maksims Svecovs | 9ddf86a | 2021-05-06 17:17:21 +0100 | [diff] [blame] | 17 | #include "hf/arch/plat/ffa.h" |
Karl Meakin | 9724b36 | 2024-10-15 14:35:02 +0100 | [diff] [blame] | 18 | #include "hf/arch/plat/ffa/indirect_messaging.h" |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 19 | #include "hf/arch/plat/smc.h" |
Madhukar Pappireddy | a3787c9 | 2024-09-25 14:50:36 -0500 | [diff] [blame] | 20 | #include "hf/arch/timer.h" |
J-Alves | 03edf40 | 2023-07-21 15:13:49 +0100 | [diff] [blame] | 21 | #include "hf/arch/vmid_base.h" |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 22 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 23 | #include "hf/api.h" |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 24 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 25 | #include "hf/cpu.h" |
| 26 | #include "hf/dlog.h" |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 27 | #include "hf/ffa.h" |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 28 | #include "hf/ffa_internal.h" |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 29 | #include "hf/hf_ipi.h" |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 30 | #include "hf/panic.h" |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 31 | #include "hf/plat/interrupts.h" |
Madhukar Pappireddy | a3787c9 | 2024-09-25 14:50:36 -0500 | [diff] [blame] | 32 | #include "hf/timer_mgmt.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 33 | #include "hf/vm.h" |
Karl Meakin | d0356f8 | 2024-09-04 13:34:31 +0100 | [diff] [blame] | 34 | #include "hf/vm_ids.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 35 | |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 36 | #include "vmapi/hf/call.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 37 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 38 | #include "debug_el1.h" |
Madhukar Pappireddy | f684d19 | 2024-09-25 14:35:57 -0500 | [diff] [blame] | 39 | #include "el1_physical_timer.h" |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 40 | #include "feature_id.h" |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 41 | #include "perfmon.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 42 | #include "psci.h" |
Andrew Walbran | 3364565 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 43 | #include "psci_handler.h" |
Andrew Scull | 7fd4bb7 | 2018-12-08 23:40:12 +0000 | [diff] [blame] | 44 | #include "smc.h" |
Fuad Tabba | ba8c44d | 2019-09-23 14:38:58 +0100 | [diff] [blame] | 45 | #include "sysregs.h" |
Karl Meakin | 5a13355 | 2024-05-30 16:06:27 +0100 | [diff] [blame] | 46 | #include "sysregs_defs.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 47 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 48 | /** |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 49 | * Hypervisor Fault Address Register Non-Secure. |
| 50 | */ |
| 51 | #define HPFAR_EL2_NS (UINT64_C(0x1) << 63) |
| 52 | |
| 53 | /** |
| 54 | * Hypervisor Fault Address Register Faulting IPA. |
| 55 | */ |
| 56 | #define HPFAR_EL2_FIPA (UINT64_C(0xFFFFFFFFFF0)) |
| 57 | |
| 58 | /** |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 59 | * Gets the value to increment for the next PC. |
| 60 | * The ESR encodes whether the instruction is 2 bytes or 4 bytes long. |
| 61 | */ |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 62 | #define GET_NEXT_PC_INC(esr) (GET_ESR_IL(esr) ? 4 : 2) |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 63 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 64 | /** |
Andrew Walbran | 0dd67ff | 2019-09-12 16:38:50 +0100 | [diff] [blame] | 65 | * The Client ID field within X7 for an SMC64 call. |
| 66 | */ |
| 67 | #define CLIENT_ID_MASK UINT64_C(0xffff) |
| 68 | |
Karl Meakin | d0356f8 | 2024-09-04 13:34:31 +0100 | [diff] [blame] | 69 | /** |
| 70 | * Identifies SPMD specific framework messages. See section 18.2 of v1.2 FF-A |
| 71 | * specification. |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 72 | */ |
Karl Meakin | d0356f8 | 2024-09-04 13:34:31 +0100 | [diff] [blame] | 73 | enum ffa_spmd_framework_msg_func { |
| 74 | SPMD_FRAMEWORK_MSG_PSCI_REQ = 0, |
| 75 | SPMD_FRAMEWORK_MSG_PSCI_RESP = 2, |
| 76 | |
| 77 | SPMD_FRAMEWORK_MSG_FFA_VERSION_REQ = 8, |
| 78 | SPMD_FRAMEWORK_MSG_FFA_VERSION_RESP = 9, |
| 79 | }; |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 80 | |
Andrew Walbran | 0dd67ff | 2019-09-12 16:38:50 +0100 | [diff] [blame] | 81 | /** |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 82 | * Returns a reference to the currently executing vCPU. |
| 83 | */ |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 84 | static struct vcpu *current(void) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 85 | { |
Daniel Boulby | 3f78426 | 2021-09-27 13:02:54 +0100 | [diff] [blame] | 86 | // NOLINTNEXTLINE(performance-no-int-to-ptr) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 87 | return (struct vcpu *)read_msr(tpidr_el2); |
| 88 | } |
| 89 | |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 90 | /** |
Madhukar Pappireddy | d3ac738 | 2024-09-25 14:29:03 -0500 | [diff] [blame] | 91 | * Saves the state of per-vCPU peripherals, such as the arch timer, and |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 92 | * informs the arch-independent sections that registers have been saved. |
| 93 | */ |
| 94 | void complete_saving_state(struct vcpu *vcpu) |
| 95 | { |
Madhukar Pappireddy | a3787c9 | 2024-09-25 14:50:36 -0500 | [diff] [blame] | 96 | host_timer_save_arch_timer(&vcpu->regs.arch_timer); |
| 97 | |
| 98 | timer_vcpu_manage(vcpu); |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 99 | api_regs_state_saved(vcpu); |
Madhukar Pappireddy | a3787c9 | 2024-09-25 14:50:36 -0500 | [diff] [blame] | 100 | |
| 101 | /* |
| 102 | * Since switching away from current vCPU, disable the host physical |
| 103 | * timer for now. If necessary, the host timer will be reconfigured |
| 104 | * at appropriate time to track timer deadline of the vCPU. |
| 105 | */ |
| 106 | host_timer_disable(); |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | /** |
Madhukar Pappireddy | d3ac738 | 2024-09-25 14:29:03 -0500 | [diff] [blame] | 110 | * Restores the state of per-vCPU peripherals, such as the arch timer. |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 111 | */ |
| 112 | void begin_restoring_state(struct vcpu *vcpu) |
| 113 | { |
Madhukar Pappireddy | a3787c9 | 2024-09-25 14:50:36 -0500 | [diff] [blame] | 114 | /* |
| 115 | * If a vCPU's timer has expired while it was de-scheduled, SPMC will |
| 116 | * inject the virtual timer interrupt before resuming the vCPU. |
| 117 | * If not, there is a live state and we need to configure the host timer |
| 118 | * to track it again. |
| 119 | */ |
| 120 | if (arch_timer_enabled(&vcpu->regs) && |
| 121 | (arch_timer_remaining_ns(&vcpu->regs) != 0)) { |
| 122 | host_timer_track_deadline(&vcpu->regs.arch_timer); |
| 123 | } |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 124 | } |
| 125 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 126 | /** |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 127 | * Invalidate all stage 1 TLB entries on the current (physical) CPU for the |
| 128 | * current VMID. |
| 129 | */ |
| 130 | static void invalidate_vm_tlb(void) |
| 131 | { |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 132 | /* |
| 133 | * Ensure that the last VTTBR write has taken effect so we invalidate |
| 134 | * the right set of TLB entries. |
| 135 | */ |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 136 | isb(); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 137 | |
Olivier Deprez | 0b0ba8c | 2023-03-17 11:11:53 +0100 | [diff] [blame] | 138 | tlbi(vmalle1); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 139 | |
| 140 | /* |
| 141 | * Ensure that no instructions are fetched for the VM until after the |
| 142 | * TLB invalidation has taken effect. |
| 143 | */ |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 144 | isb(); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 145 | |
| 146 | /* |
| 147 | * Ensure that no data reads or writes for the VM happen until after the |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 148 | * TLB invalidation has taken effect. Non-shareable is enough because |
| 149 | * the TLB is local to the CPU. |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 150 | */ |
David Brazdil | 851948e | 2019-08-09 12:02:12 +0100 | [diff] [blame] | 151 | dsb(nsh); |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | /** |
| 155 | * Invalidates the TLB if a different vCPU is being run than the last vCPU of |
| 156 | * the same VM which was run on the current pCPU. |
| 157 | * |
| 158 | * This is necessary because VMs may (contrary to the architecture |
| 159 | * specification) use inconsistent ASIDs across vCPUs. c.f. KVM's similar |
| 160 | * workaround: |
| 161 | * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=94d0e5980d6791b9 |
| 162 | */ |
| 163 | void maybe_invalidate_tlb(struct vcpu *vcpu) |
| 164 | { |
| 165 | size_t current_cpu_index = cpu_index(vcpu->cpu); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 166 | ffa_vcpu_index_t new_vcpu_index = vcpu_index(vcpu); |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 167 | |
| 168 | if (vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] != |
| 169 | new_vcpu_index) { |
| 170 | /* |
| 171 | * The vCPU has changed since the last time this VM was run on |
| 172 | * this pCPU, so we need to invalidate the TLB. |
| 173 | */ |
| 174 | invalidate_vm_tlb(); |
| 175 | |
| 176 | /* Record the fact that this vCPU is now running on this CPU. */ |
| 177 | vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] = |
| 178 | new_vcpu_index; |
| 179 | } |
| 180 | } |
| 181 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 182 | noreturn void irq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 183 | { |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 184 | (void)elr; |
| 185 | (void)spsr; |
| 186 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 187 | panic("IRQ from current exception level."); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 188 | } |
| 189 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 190 | noreturn void fiq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 191 | { |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 192 | (void)elr; |
| 193 | (void)spsr; |
| 194 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 195 | panic("FIQ from current exception level."); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 196 | } |
| 197 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 198 | noreturn void serr_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 199 | { |
| 200 | (void)elr; |
| 201 | (void)spsr; |
| 202 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 203 | panic("SError from current exception level."); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 204 | } |
| 205 | |
J-Alves | a2d1c3b | 2024-03-28 12:46:58 +0000 | [diff] [blame] | 206 | /** |
| 207 | * Returns true if ELR_EL2 is not to be restored from stack. |
| 208 | * Currently function doesn't return false, as for all other cases |
| 209 | * panics. |
| 210 | */ |
| 211 | bool sync_current_exception(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 212 | { |
| 213 | uintreg_t esr = read_msr(esr_el2); |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 214 | uintreg_t ec = GET_ESR_EC(esr); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 215 | (void)spsr; |
| 216 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 217 | switch (ec) { |
J-Alves | a2d1c3b | 2024-03-28 12:46:58 +0000 | [diff] [blame] | 218 | case EC_DATA_ABORT_SAME_EL: { |
| 219 | uint64_t iss = GET_ESR_ISS(esr); |
| 220 | uint64_t dfsc = GET_ESR_ISS_DFSC(iss); |
| 221 | uint64_t far = read_msr(far_el2); |
| 222 | |
| 223 | /* Handle Granule Protection Fault. */ |
| 224 | if (is_arch_feat_rme_supported() && dfsc == DFSC_GPF) { |
| 225 | dlog_verbose( |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 226 | "Granule Protection Fault: esr=%#lx, ec=%#lx, " |
| 227 | "far=%#lx, elr=%#lx\n", |
J-Alves | a2d1c3b | 2024-03-28 12:46:58 +0000 | [diff] [blame] | 228 | esr, ec, far, elr); |
| 229 | |
| 230 | /* |
| 231 | * Change ELR_EL2 only if failed whilst either |
| 232 | * reading or writing within 'memcpy_trapped'. |
| 233 | */ |
| 234 | if (elr == (uintptr_t)memcpy_trapped_read || |
| 235 | elr == (uintptr_t)memcpy_trapped_write) { |
| 236 | dlog_verbose( |
| 237 | "GPF due to data abort on %s.\n", |
| 238 | (elr == (uintptr_t)memcpy_trapped_read) |
| 239 | ? "read" |
| 240 | : "write"); |
| 241 | |
| 242 | /* |
| 243 | * Update the ELR_EL2 with the return |
| 244 | * address, to return error from the |
| 245 | * call to 'memcpy_trapped'. |
| 246 | */ |
| 247 | write_msr(ELR_EL2, memcpy_trapped_aborted); |
| 248 | return true; |
| 249 | } |
| 250 | } |
| 251 | |
Kathleen Capella | d1c34b5 | 2024-04-01 21:27:15 -0400 | [diff] [blame] | 252 | #if ENABLE_MTE |
| 253 | if (dfsc == DFSC_SYNC_TAG_CHECK_FAULT) { |
| 254 | dlog_error( |
| 255 | "Data abort due to synchronous tag check " |
| 256 | "fault: pc=%#lx, esr=%#lx, ec=%#lx, " |
| 257 | "far=%#lx, dfsc = %#lx\n", |
| 258 | elr, esr, ec, far, dfsc); |
| 259 | } |
| 260 | break; |
| 261 | #endif |
Karl Meakin | 5a13355 | 2024-05-30 16:06:27 +0100 | [diff] [blame] | 262 | if (!GET_ESR_FNV(esr)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 263 | dlog_error( |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 264 | "Data abort: pc=%#lx, esr=%#lx, ec=%#lx, " |
| 265 | "far=%#lx\n", |
J-Alves | a2d1c3b | 2024-03-28 12:46:58 +0000 | [diff] [blame] | 266 | elr, esr, ec, far); |
| 267 | |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 268 | } else { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 269 | dlog_error( |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 270 | "Data abort: pc=%#lx, esr=%#lx, ec=%#lx, " |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 271 | "far=invalid\n", |
| 272 | elr, esr, ec); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 273 | } |
J-Alves | a2d1c3b | 2024-03-28 12:46:58 +0000 | [diff] [blame] | 274 | } break; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 275 | default: |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 276 | dlog_error( |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 277 | "Unknown current sync exception pc=%#lx, esr=%#lx, " |
| 278 | "ec=%#lx\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 279 | elr, esr, ec); |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 280 | break; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 281 | } |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 282 | |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 283 | panic("EL2 exception"); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 284 | } |
| 285 | |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 286 | /** |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 287 | * Sets or clears the VF bit in the HCR_EL2 register saved in the given |
| 288 | * arch_regs. |
| 289 | */ |
| 290 | static void set_virtual_fiq(struct arch_regs *r, bool enable) |
| 291 | { |
| 292 | if (enable) { |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 293 | r->hyp_state.hcr_el2 |= HCR_EL2_VF; |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 294 | } else { |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 295 | r->hyp_state.hcr_el2 &= ~HCR_EL2_VF; |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 296 | } |
| 297 | } |
| 298 | |
| 299 | /** |
J-Alves | 6f6bf8a | 2024-07-25 15:17:57 +0100 | [diff] [blame] | 300 | * Sets or clears the VI bit in the HCR_EL2 register saved in the given |
| 301 | * arch_regs. |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 302 | */ |
J-Alves | 6f6bf8a | 2024-07-25 15:17:57 +0100 | [diff] [blame] | 303 | static void set_virtual_irq(struct arch_regs *r, bool enable) |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 304 | { |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 305 | if (enable) { |
J-Alves | 6f6bf8a | 2024-07-25 15:17:57 +0100 | [diff] [blame] | 306 | r->hyp_state.hcr_el2 |= HCR_EL2_VI; |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 307 | } else { |
J-Alves | 6f6bf8a | 2024-07-25 15:17:57 +0100 | [diff] [blame] | 308 | r->hyp_state.hcr_el2 &= ~HCR_EL2_VI; |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 309 | } |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 310 | } |
| 311 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 312 | #if SECURE_WORLD == 1 |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 313 | /** |
Karl Meakin | d0356f8 | 2024-09-04 13:34:31 +0100 | [diff] [blame] | 314 | * Handle special direct messages from SPMD to SPMC. |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 315 | */ |
| 316 | static bool spmd_handler(struct ffa_value *args, struct vcpu *current) |
| 317 | { |
J-Alves | 19e20cf | 2023-08-02 12:48:55 +0100 | [diff] [blame] | 318 | ffa_id_t sender = ffa_sender(*args); |
| 319 | ffa_id_t receiver = ffa_receiver(*args); |
| 320 | ffa_id_t current_vm_id = current->vm->id; |
Karl Meakin | d0356f8 | 2024-09-04 13:34:31 +0100 | [diff] [blame] | 321 | enum ffa_spmd_framework_msg_func func = |
| 322 | (enum ffa_spmd_framework_msg_func)ffa_framework_msg_func(*args); |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 323 | |
| 324 | /* |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 325 | * Check if direct message request is originating from the SPMD, |
| 326 | * directed to the SPMC and the message is a framework message. |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 327 | */ |
| 328 | if (!(sender == HF_SPMD_VM_ID && receiver == HF_SPMC_VM_ID && |
Karl Meakin | d0356f8 | 2024-09-04 13:34:31 +0100 | [diff] [blame] | 329 | current_vm_id == HF_OTHER_WORLD_ID && |
| 330 | ffa_is_framework_msg(*args))) { |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 331 | return false; |
| 332 | } |
| 333 | |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 334 | /* |
| 335 | * The framework message is conveyed by EL3/SPMD to SPMC so the |
| 336 | * current VM id must match to the other world VM id. |
| 337 | */ |
| 338 | CHECK(current->vm->id == HF_HYPERVISOR_VM_ID); |
| 339 | |
Karl Meakin | d0356f8 | 2024-09-04 13:34:31 +0100 | [diff] [blame] | 340 | switch (func) { |
| 341 | case SPMD_FRAMEWORK_MSG_PSCI_REQ: { |
| 342 | enum psci_return_code psci_msg_response = |
| 343 | PSCI_ERROR_NOT_SUPPORTED; |
Olivier Deprez | 181074b | 2023-02-02 14:53:23 +0100 | [diff] [blame] | 344 | struct vcpu *boot_vcpu = vcpu_get_boot_vcpu(); |
| 345 | struct vm *vm = boot_vcpu->vm; |
Olivier Deprez | 98f151e | 2023-01-10 15:08:54 +0100 | [diff] [blame] | 346 | struct vcpu_locked vcpu_locked; |
Olivier Deprez | 181074b | 2023-02-02 14:53:23 +0100 | [diff] [blame] | 347 | |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 348 | /* |
| 349 | * TODO: the power management event reached the SPMC. |
| 350 | * In a later iteration, the power management event can |
| 351 | * be passed to the SP by resuming it. |
| 352 | */ |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 353 | switch (args->arg3) { |
| 354 | case PSCI_CPU_OFF: { |
Olivier Deprez | 98f151e | 2023-01-10 15:08:54 +0100 | [diff] [blame] | 355 | if (vm_power_management_cpu_off_requested(vm) == true) { |
Daniel Boulby | 5fe882d | 2023-08-07 10:36:53 +0100 | [diff] [blame] | 356 | struct vcpu *vcpu; |
| 357 | |
Olivier Deprez | 98f151e | 2023-01-10 15:08:54 +0100 | [diff] [blame] | 358 | /* Allow only S-EL1 MP SPs to reach here. */ |
| 359 | CHECK(vm->el0_partition == false); |
| 360 | CHECK(vm->vcpu_count > 1); |
| 361 | |
| 362 | vcpu = vm_get_vcpu(vm, vcpu_index(current)); |
| 363 | vcpu_locked = vcpu_lock(vcpu); |
| 364 | vcpu->state = VCPU_STATE_OFF; |
| 365 | vcpu_unlock(&vcpu_locked); |
| 366 | cpu_off(vcpu->cpu); |
Daniel Boulby | 5fe882d | 2023-08-07 10:36:53 +0100 | [diff] [blame] | 367 | dlog_verbose("cpu%u off notification!\n", |
| 368 | vcpu_index(vcpu)); |
Olivier Deprez | 98f151e | 2023-01-10 15:08:54 +0100 | [diff] [blame] | 369 | } |
| 370 | |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 371 | psci_msg_response = PSCI_RETURN_SUCCESS; |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 372 | break; |
| 373 | } |
| 374 | default: |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 375 | dlog_error( |
| 376 | "FF-A PSCI framework message not handled " |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 377 | "%#lx %#lx %#lx %#lx\n", |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 378 | args->func, args->arg1, args->arg2, args->arg3); |
| 379 | psci_msg_response = PSCI_ERROR_NOT_SUPPORTED; |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 380 | } |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 381 | |
Karl Meakin | d0356f8 | 2024-09-04 13:34:31 +0100 | [diff] [blame] | 382 | *args = ffa_framework_msg_resp(HF_SPMC_VM_ID, HF_SPMD_VM_ID, |
| 383 | SPMD_FRAMEWORK_MSG_PSCI_RESP, |
| 384 | psci_msg_response); |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 385 | return true; |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 386 | } |
Karl Meakin | d0356f8 | 2024-09-04 13:34:31 +0100 | [diff] [blame] | 387 | case SPMD_FRAMEWORK_MSG_FFA_VERSION_REQ: { |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 388 | struct ffa_value ret = api_ffa_version(current, args->arg3); |
Karl Meakin | d0356f8 | 2024-09-04 13:34:31 +0100 | [diff] [blame] | 389 | *args = ffa_framework_msg_resp( |
| 390 | HF_SPMC_VM_ID, HF_SPMD_VM_ID, |
| 391 | SPMD_FRAMEWORK_MSG_FFA_VERSION_RESP, ret.func); |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 392 | return true; |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 393 | } |
| 394 | default: |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 395 | dlog_error("FF-A framework message not handled %#lx\n", |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 396 | args->arg2); |
| 397 | |
| 398 | /* |
| 399 | * TODO: the framework message that was conveyed by a direct |
| 400 | * request is not handled although we still want to complete |
| 401 | * by a direct response. However, there is no defined error |
| 402 | * response to state that the message couldn't be handled. |
| 403 | * An alternative would be to return FFA_ERROR. |
| 404 | */ |
Karl Meakin | d0356f8 | 2024-09-04 13:34:31 +0100 | [diff] [blame] | 405 | *args = ffa_framework_msg_resp(HF_SPMC_VM_ID, HF_SPMD_VM_ID, |
| 406 | func, 0); |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 407 | return true; |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 408 | } |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 409 | } |
Madhukar Pappireddy | cf069a6 | 2024-09-25 15:36:32 -0500 | [diff] [blame] | 410 | |
| 411 | void spmc_exit_to_nwd(struct vcpu *owd_vcpu) |
| 412 | { |
| 413 | struct vcpu *deadline_vcpu = |
| 414 | timer_find_vcpu_nearest_deadline(owd_vcpu->cpu); |
| 415 | |
| 416 | /* |
| 417 | * SPMC tracks a vCPU's timer deadline through its host timer such that |
| 418 | * it can bring back execution from normal world to signal the timer |
| 419 | * virtual interrupt to the SP's vCPU. |
| 420 | */ |
| 421 | if (deadline_vcpu != NULL) { |
| 422 | host_timer_track_deadline(&deadline_vcpu->regs.arch_timer); |
| 423 | } |
| 424 | } |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 425 | #endif |
| 426 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 427 | /** |
| 428 | * Checks whether to block an SMC being forwarded from a VM. |
| 429 | */ |
| 430 | static bool smc_is_blocked(const struct vm *vm, uint32_t func) |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 431 | { |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 432 | bool block_by_default = !vm->smc_whitelist.permissive; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 433 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 434 | for (size_t i = 0; i < vm->smc_whitelist.smc_count; ++i) { |
| 435 | if (func == vm->smc_whitelist.smcs[i]) { |
| 436 | return false; |
| 437 | } |
| 438 | } |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 439 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 440 | dlog_notice("SMC %#010x attempted from VM %#x, blocked=%u\n", func, |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 441 | vm->id, block_by_default); |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 442 | |
| 443 | /* Access is still allowed in permissive mode. */ |
| 444 | return block_by_default; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 445 | } |
| 446 | |
| 447 | /** |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 448 | * Applies SMC access control according to manifest and forwards the call if |
| 449 | * access is granted. |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 450 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 451 | static void smc_forwarder(const struct vm *vm, struct ffa_value *args) |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 452 | { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 453 | struct ffa_value ret; |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 454 | uint32_t client_id = vm->id; |
| 455 | uintreg_t arg7 = args->arg7; |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 456 | |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 457 | if (smc_is_blocked(vm, args->func)) { |
| 458 | args->func = SMCCC_ERROR_UNKNOWN; |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 459 | return; |
| 460 | } |
| 461 | |
Andrew Walbran | 0dd67ff | 2019-09-12 16:38:50 +0100 | [diff] [blame] | 462 | /* |
| 463 | * Set the Client ID but keep the existing Secure OS ID and anything |
| 464 | * else (currently unspecified) that the client may have passed in the |
| 465 | * upper bits. |
| 466 | */ |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 467 | args->arg7 = client_id | (arg7 & ~CLIENT_ID_MASK); |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 468 | ret = smc_forward(args->func, args->arg1, args->arg2, args->arg3, |
| 469 | args->arg4, args->arg5, args->arg6, args->arg7); |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 470 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 471 | /* |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 472 | * Preserve the value passed by the caller, rather than the generated |
| 473 | * client_id. Note that this would also overwrite any return value that |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 474 | * may be in x7, but the SMCs that we are forwarding are legacy calls |
| 475 | * from before SMCCC 1.2 so won't have more than 4 return values anyway. |
| 476 | */ |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 477 | ret.arg7 = arg7; |
| 478 | |
| 479 | plat_smc_post_forward(*args, &ret); |
| 480 | |
| 481 | *args = ret; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 482 | } |
| 483 | |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 484 | /** |
| 485 | * In the normal world, ffa_handler is always called from the virtual FF-A |
Andrew Walbran | 8e8bf3f | 2020-10-07 17:58:20 +0100 | [diff] [blame] | 486 | * instance (from a VM in EL1). In the secure world, ffa_handler may be called |
| 487 | * from the virtual (a secure partition in S-EL1) or physical FF-A instance |
| 488 | * (from the normal world via EL3). The function returns true when the call is |
| 489 | * handled. The *next pointer is updated to the next vCPU to run, which might be |
| 490 | * the 'other world' vCPU if the call originated from the virtual FF-A instance |
| 491 | * and has to be forwarded down to EL3, or left as is to resume the current |
| 492 | * vCPU. |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 493 | */ |
| 494 | static bool ffa_handler(struct ffa_value *args, struct vcpu *current, |
| 495 | struct vcpu **next) |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 496 | { |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 497 | uint32_t func = args->func; |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 498 | |
Jose Marinho | c0f4ff2 | 2019-10-09 10:37:42 +0100 | [diff] [blame] | 499 | /* |
| 500 | * NOTE: When adding new methods to this handler update |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 501 | * api_ffa_features accordingly. |
Jose Marinho | c0f4ff2 | 2019-10-09 10:37:42 +0100 | [diff] [blame] | 502 | */ |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 503 | switch (func) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 504 | case FFA_VERSION_32: |
Daniel Boulby | baeaf2e | 2021-12-09 11:42:36 +0000 | [diff] [blame] | 505 | *args = api_ffa_version(current, args->arg1); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 506 | return true; |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 507 | case FFA_PARTITION_INFO_GET_32: { |
| 508 | struct ffa_uuid uuid; |
| 509 | |
| 510 | ffa_uuid_init(args->arg1, args->arg2, args->arg3, args->arg4, |
| 511 | &uuid); |
Daniel Boulby | b46cad1 | 2021-12-13 17:47:21 +0000 | [diff] [blame] | 512 | *args = api_ffa_partition_info_get(current, &uuid, args->arg5); |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 513 | return true; |
| 514 | } |
Raghu Krishnamurthy | 7592bcb | 2022-12-25 13:09:00 -0800 | [diff] [blame] | 515 | case FFA_PARTITION_INFO_GET_REGS_64: { |
| 516 | struct ffa_uuid uuid; |
Raghu Krishnamurthy | 7592bcb | 2022-12-25 13:09:00 -0800 | [diff] [blame] | 517 | uint16_t start_index; |
| 518 | uint16_t tag; |
| 519 | |
Karl Meakin | 9478e32 | 2024-09-23 17:47:09 +0100 | [diff] [blame] | 520 | ffa_uuid_from_u64x2(args->arg1, args->arg2, &uuid); |
Raghu Krishnamurthy | d29411a | 2023-02-17 17:22:04 -0800 | [diff] [blame] | 521 | start_index = args->arg3 & 0xFFFF; |
| 522 | tag = (args->arg3 >> 16) & 0xFFFF; |
Raghu Krishnamurthy | 7592bcb | 2022-12-25 13:09:00 -0800 | [diff] [blame] | 523 | *args = api_ffa_partition_info_get_regs(current, &uuid, |
| 524 | start_index, tag); |
| 525 | return true; |
| 526 | } |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 527 | case FFA_ID_GET_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 528 | *args = api_ffa_id_get(current); |
Andrew Walbran | d230f66 | 2019-10-07 18:03:36 +0100 | [diff] [blame] | 529 | return true; |
Daniel Boulby | b2fb80e | 2021-02-03 15:09:23 +0000 | [diff] [blame] | 530 | case FFA_SPM_ID_GET_32: |
| 531 | *args = api_ffa_spm_id_get(); |
| 532 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 533 | case FFA_FEATURES_32: |
Karl Meakin | f1ed5f1 | 2024-02-22 15:57:36 +0000 | [diff] [blame] | 534 | *args = api_ffa_features(args->arg1, args->arg2, current); |
Jose Marinho | c0f4ff2 | 2019-10-09 10:37:42 +0100 | [diff] [blame] | 535 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 536 | case FFA_RX_RELEASE_32: |
J-Alves | e8c8c2b | 2022-12-16 15:34:48 +0000 | [diff] [blame] | 537 | *args = api_ffa_rx_release(ffa_receiver(*args), current); |
Andrew Walbran | 8a0f5ca | 2019-11-05 13:12:23 +0000 | [diff] [blame] | 538 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 539 | case FFA_RXTX_MAP_64: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 540 | *args = api_ffa_rxtx_map(ipa_init(args->arg1), |
| 541 | ipa_init(args->arg2), args->arg3, |
Federico Recanati | 9f1b653 | 2022-04-14 13:15:28 +0200 | [diff] [blame] | 542 | current); |
Andrew Walbran | bfffb0f | 2019-11-05 14:02:34 +0000 | [diff] [blame] | 543 | return true; |
Daniel Boulby | 9e420ca | 2021-07-07 15:03:49 +0100 | [diff] [blame] | 544 | case FFA_RXTX_UNMAP_32: |
J-Alves | 7007993 | 2022-12-07 17:32:20 +0000 | [diff] [blame] | 545 | *args = api_ffa_rxtx_unmap(ffa_vm_id(*args), current); |
Daniel Boulby | 9e420ca | 2021-07-07 15:03:49 +0100 | [diff] [blame] | 546 | return true; |
Federico Recanati | 644f046 | 2022-03-17 12:04:00 +0100 | [diff] [blame] | 547 | case FFA_RX_ACQUIRE_32: |
| 548 | *args = api_ffa_rx_acquire(ffa_receiver(*args), current); |
| 549 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 550 | case FFA_YIELD_32: |
Madhukar Pappireddy | 184501c | 2023-05-23 17:24:06 -0500 | [diff] [blame] | 551 | *args = api_yield(current, next, args); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 552 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 553 | case FFA_MSG_SEND_32: |
J-Alves | 27b7196 | 2022-12-12 15:29:58 +0000 | [diff] [blame] | 554 | *args = plat_ffa_msg_send( |
| 555 | ffa_sender(*args), ffa_receiver(*args), |
| 556 | ffa_msg_send_size(*args), current, next); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 557 | return true; |
Federico Recanati | 25053ee | 2022-03-14 15:01:53 +0100 | [diff] [blame] | 558 | case FFA_MSG_SEND2_32: |
| 559 | *args = api_ffa_msg_send2(ffa_sender(*args), |
| 560 | ffa_msg_send2_flags(*args), current); |
| 561 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 562 | case FFA_MSG_WAIT_32: |
Madhukar Pappireddy | 5522c67 | 2021-12-17 16:35:51 -0600 | [diff] [blame] | 563 | *args = api_ffa_msg_wait(current, next, args); |
Andrew Walbran | 0de4f16 | 2019-09-03 16:44:20 +0100 | [diff] [blame] | 564 | return true; |
J-Alves | bc7ab4f | 2022-12-13 12:09:25 +0000 | [diff] [blame] | 565 | #if SECURE_WORLD == 0 |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame] | 566 | case FFA_MSG_POLL_32: { |
| 567 | struct vcpu_locked current_locked; |
| 568 | |
| 569 | current_locked = vcpu_lock(current); |
J-Alves | 2ced167 | 2022-12-12 14:35:38 +0000 | [diff] [blame] | 570 | *args = plat_ffa_msg_recv(false, current_locked, next); |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame] | 571 | vcpu_unlock(¤t_locked); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 572 | return true; |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame] | 573 | } |
J-Alves | bc7ab4f | 2022-12-13 12:09:25 +0000 | [diff] [blame] | 574 | #endif |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 575 | case FFA_RUN_32: |
Kathleen Capella | 036cc59 | 2023-11-30 18:26:15 -0500 | [diff] [blame] | 576 | /** |
| 577 | * Ensure that an FF-A v1.2 endpoint preserves the |
| 578 | * runtime state of the calling partition by setting |
| 579 | * the extended registers (x8-x17) to zero. |
| 580 | */ |
Karl Meakin | 0e617d9 | 2024-04-05 12:55:22 +0100 | [diff] [blame] | 581 | if (current->vm->ffa_version >= FFA_VERSION_1_2 && |
Kathleen Capella | 036cc59 | 2023-11-30 18:26:15 -0500 | [diff] [blame] | 582 | !api_extended_args_are_zero(args)) { |
| 583 | *args = ffa_error(FFA_INVALID_PARAMETERS); |
| 584 | return false; |
| 585 | } |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 586 | *args = api_ffa_run(ffa_vm_id(*args), ffa_vcpu_index(*args), |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 587 | current, next); |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame] | 588 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 589 | case FFA_MEM_DONATE_32: |
J-Alves | 95fbb31 | 2024-03-20 15:19:16 +0000 | [diff] [blame] | 590 | case FFA_MEM_DONATE_64: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 591 | case FFA_MEM_LEND_32: |
J-Alves | 95fbb31 | 2024-03-20 15:19:16 +0000 | [diff] [blame] | 592 | case FFA_MEM_LEND_64: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 593 | case FFA_MEM_SHARE_32: |
J-Alves | 95fbb31 | 2024-03-20 15:19:16 +0000 | [diff] [blame] | 594 | case FFA_MEM_SHARE_64: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 595 | *args = api_ffa_mem_send(func, args->arg1, args->arg2, |
| 596 | ipa_init(args->arg3), args->arg4, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 597 | current); |
Andrew Walbran | 82d6d15 | 2019-12-24 15:02:06 +0000 | [diff] [blame] | 598 | return true; |
J-Alves | 95fbb31 | 2024-03-20 15:19:16 +0000 | [diff] [blame] | 599 | case FFA_MEM_RETRIEVE_REQ_64: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 600 | case FFA_MEM_RETRIEVE_REQ_32: |
| 601 | *args = api_ffa_mem_retrieve_req(args->arg1, args->arg2, |
| 602 | ipa_init(args->arg3), |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 603 | args->arg4, current); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 604 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 605 | case FFA_MEM_RELINQUISH_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 606 | *args = api_ffa_mem_relinquish(current); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 607 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 608 | case FFA_MEM_RECLAIM_32: |
| 609 | *args = api_ffa_mem_reclaim( |
Andrew Walbran | 1bbe940 | 2020-04-30 16:47:13 +0100 | [diff] [blame] | 610 | ffa_assemble_handle(args->arg1, args->arg2), args->arg3, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 611 | current); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 612 | return true; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 613 | case FFA_MEM_FRAG_RX_32: |
| 614 | *args = api_ffa_mem_frag_rx(ffa_frag_handle(*args), args->arg3, |
| 615 | (args->arg4 >> 16) & 0xffff, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 616 | current); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 617 | return true; |
| 618 | case FFA_MEM_FRAG_TX_32: |
| 619 | *args = api_ffa_mem_frag_tx(ffa_frag_handle(*args), args->arg3, |
| 620 | (args->arg4 >> 16) & 0xffff, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 621 | current); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 622 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 623 | case FFA_MSG_SEND_DIRECT_REQ_64: |
Karl Meakin | d0356f8 | 2024-09-04 13:34:31 +0100 | [diff] [blame] | 624 | case FFA_MSG_SEND_DIRECT_REQ_32: |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 625 | #if SECURE_WORLD == 1 |
| 626 | if (spmd_handler(args, current)) { |
| 627 | return true; |
| 628 | } |
| 629 | #endif |
Kathleen Capella | 41fea93 | 2023-06-23 17:39:28 -0400 | [diff] [blame] | 630 | case FFA_MSG_SEND_DIRECT_REQ2_64: |
Karl Meakin | 13f0981 | 2024-10-28 16:33:23 +0000 | [diff] [blame] | 631 | *args = api_ffa_msg_send_direct_req(*args, current, next); |
Kathleen Capella | 41fea93 | 2023-06-23 17:39:28 -0400 | [diff] [blame] | 632 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 633 | case FFA_MSG_SEND_DIRECT_RESP_64: |
Olivier Deprez | ee9d6a9 | 2019-11-26 09:14:11 +0000 | [diff] [blame] | 634 | case FFA_MSG_SEND_DIRECT_RESP_32: |
Kathleen Capella | 087e502 | 2023-09-07 18:04:15 -0400 | [diff] [blame] | 635 | case FFA_MSG_SEND_DIRECT_RESP2_64: |
Karl Meakin | 13f0981 | 2024-10-28 16:33:23 +0000 | [diff] [blame] | 636 | *args = api_ffa_msg_send_direct_resp(*args, current, next); |
Olivier Deprez | ee9d6a9 | 2019-11-26 09:14:11 +0000 | [diff] [blame] | 637 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 638 | case FFA_SECONDARY_EP_REGISTER_64: |
Olivier Deprez | d614d32 | 2021-06-18 15:21:00 +0200 | [diff] [blame] | 639 | /* |
| 640 | * DEN0077A FF-A v1.1 Beta0 section 18.3.2.1.1 |
| 641 | * The callee must return NOT_SUPPORTED if this function is |
| 642 | * invoked by a caller that implements version v1.0 of |
| 643 | * the Framework. |
| 644 | */ |
Max Shvetsov | 40108e7 | 2020-08-27 12:39:50 +0100 | [diff] [blame] | 645 | *args = api_ffa_secondary_ep_register(ipa_init(args->arg1), |
| 646 | current); |
| 647 | return true; |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 648 | case FFA_NOTIFICATION_BITMAP_CREATE_32: |
| 649 | *args = api_ffa_notification_bitmap_create( |
J-Alves | 19e20cf | 2023-08-02 12:48:55 +0100 | [diff] [blame] | 650 | (ffa_id_t)args->arg1, (ffa_vcpu_count_t)args->arg2, |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 651 | current); |
| 652 | return true; |
| 653 | case FFA_NOTIFICATION_BITMAP_DESTROY_32: |
| 654 | *args = api_ffa_notification_bitmap_destroy( |
J-Alves | 19e20cf | 2023-08-02 12:48:55 +0100 | [diff] [blame] | 655 | (ffa_id_t)args->arg1, current); |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 656 | return true; |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 657 | case FFA_NOTIFICATION_BIND_32: |
| 658 | *args = api_ffa_notification_update_bindings( |
| 659 | ffa_sender(*args), ffa_receiver(*args), args->arg2, |
| 660 | ffa_notifications_bitmap(args->arg3, args->arg4), true, |
| 661 | current); |
| 662 | return true; |
| 663 | case FFA_NOTIFICATION_UNBIND_32: |
| 664 | *args = api_ffa_notification_update_bindings( |
| 665 | ffa_sender(*args), ffa_receiver(*args), 0, |
| 666 | ffa_notifications_bitmap(args->arg3, args->arg4), false, |
| 667 | current); |
| 668 | return true; |
Raghu Krishnamurthy | ea6d25f | 2021-09-14 15:27:06 -0700 | [diff] [blame] | 669 | case FFA_MEM_PERM_SET_32: |
| 670 | case FFA_MEM_PERM_SET_64: |
| 671 | *args = api_ffa_mem_perm_set(va_init(args->arg1), args->arg2, |
| 672 | args->arg3, current); |
| 673 | return true; |
| 674 | case FFA_MEM_PERM_GET_32: |
| 675 | case FFA_MEM_PERM_GET_64: |
| 676 | *args = api_ffa_mem_perm_get(va_init(args->arg1), current); |
| 677 | return true; |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 678 | case FFA_NOTIFICATION_SET_32: |
| 679 | *args = api_ffa_notification_set( |
| 680 | ffa_sender(*args), ffa_receiver(*args), args->arg2, |
| 681 | ffa_notifications_bitmap(args->arg3, args->arg4), |
| 682 | current); |
| 683 | return true; |
| 684 | case FFA_NOTIFICATION_GET_32: |
| 685 | *args = api_ffa_notification_get( |
J-Alves | be6e303 | 2021-11-30 14:54:12 +0000 | [diff] [blame] | 686 | ffa_receiver(*args), ffa_notifications_get_vcpu(*args), |
| 687 | args->arg2, current); |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 688 | return true; |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 689 | case FFA_NOTIFICATION_INFO_GET_64: |
| 690 | *args = api_ffa_notification_info_get(current); |
| 691 | return true; |
Madhukar Pappireddy | 9e7a11f | 2021-08-03 13:59:42 -0500 | [diff] [blame] | 692 | case FFA_INTERRUPT_32: |
J-Alves | 03edf40 | 2023-07-21 15:13:49 +0100 | [diff] [blame] | 693 | /* |
| 694 | * A malicious SP could invoke a HVC/SMC call with |
| 695 | * FFA_INTERRUPT_32 as the function argument. Return error to |
| 696 | * avoid DoS. |
| 697 | */ |
| 698 | if (current->vm->id != HF_OTHER_WORLD_ID) { |
| 699 | *args = ffa_error(FFA_DENIED); |
| 700 | return true; |
| 701 | } |
J-Alves | cf0c471 | 2023-08-04 14:41:50 +0100 | [diff] [blame] | 702 | |
| 703 | plat_ffa_handle_secure_interrupt(current, next); |
| 704 | |
| 705 | /* |
| 706 | * If the next vCPU belongs to an SP, the next time the NWd |
| 707 | * gets resumed these values will be overwritten by the ABI |
| 708 | * that used to handover execution back to the NWd. |
| 709 | * If the NWd is to be resumed from here, then it will |
| 710 | * receive the FFA_NORMAL_WORLD_RESUME ABI which is to signal |
| 711 | * that an interrupt has occured, thought it wasn't handled. |
| 712 | * This happens when the target vCPU was in preempted state, |
| 713 | * and the SP couldn't not be resumed to handle the interrupt. |
| 714 | */ |
| 715 | *args = (struct ffa_value){.func = FFA_NORMAL_WORLD_RESUME}; |
Madhukar Pappireddy | 9e7a11f | 2021-08-03 13:59:42 -0500 | [diff] [blame] | 716 | return true; |
Maksims Svecovs | 71b7670 | 2022-05-20 15:32:58 +0100 | [diff] [blame] | 717 | case FFA_CONSOLE_LOG_32: |
| 718 | case FFA_CONSOLE_LOG_64: |
| 719 | *args = api_ffa_console_log(*args, current); |
| 720 | return true; |
Kathleen Capella | 6ab0513 | 2023-05-10 12:27:35 -0400 | [diff] [blame] | 721 | case FFA_ERROR_32: |
| 722 | *args = plat_ffa_error_32(current, next, args->arg2); |
| 723 | return true; |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 724 | |
Karl Meakin | a5ea909 | 2024-05-28 15:40:33 +0100 | [diff] [blame] | 725 | default: |
Karl Meakin | a5ea909 | 2024-05-28 15:40:33 +0100 | [diff] [blame] | 726 | return false; |
| 727 | } |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 728 | } |
| 729 | |
| 730 | /** |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 731 | * Set or clear VI/VF bits according to pending interrupts. |
J-Alves | 6f6bf8a | 2024-07-25 15:17:57 +0100 | [diff] [blame] | 732 | * If `vcpu` is NULL, the function will set it to the currently running |
| 733 | * vCPU. |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 734 | */ |
J-Alves | 6f6bf8a | 2024-07-25 15:17:57 +0100 | [diff] [blame] | 735 | static void vcpu_update_virtual_interrupts(struct vcpu *vcpu) |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 736 | { |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 737 | struct vcpu_locked vcpu_locked; |
| 738 | |
J-Alves | 6f6bf8a | 2024-07-25 15:17:57 +0100 | [diff] [blame] | 739 | if (vcpu == NULL) { |
| 740 | vcpu = current(); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 741 | } |
J-Alves | 6f6bf8a | 2024-07-25 15:17:57 +0100 | [diff] [blame] | 742 | |
| 743 | /* Only update to those at the virtual instance. */ |
| 744 | if (vcpu->vm->el0_partition || !vm_id_is_current_world(vcpu->vm->id)) { |
| 745 | return; |
| 746 | } |
| 747 | |
| 748 | vcpu_locked = vcpu_lock(vcpu); |
| 749 | set_virtual_irq(&vcpu->regs, |
| 750 | vcpu_interrupt_irq_count_get(vcpu_locked) > 0); |
| 751 | set_virtual_fiq(&vcpu->regs, |
| 752 | vcpu_interrupt_fiq_count_get(vcpu_locked) > 0); |
| 753 | vcpu_unlock(&vcpu_locked); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 754 | } |
| 755 | |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 756 | /** |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 757 | * Handles PSCI and FF-A calls and writes the return value back to the registers |
| 758 | * of the vCPU. This is shared between smc_handler and hvc_handler. |
| 759 | * |
| 760 | * Returns true if the call was handled. |
| 761 | */ |
| 762 | static bool hvc_smc_handler(struct ffa_value args, struct vcpu *vcpu, |
| 763 | struct vcpu **next) |
| 764 | { |
Karl Meakin | 6eeec8e | 2024-03-07 18:07:20 +0000 | [diff] [blame] | 765 | const uint32_t func = args.func; |
| 766 | |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 767 | /* Do not expect PSCI calls emitted from within the secure world. */ |
| 768 | #if SECURE_WORLD == 0 |
Karl Meakin | 6eeec8e | 2024-03-07 18:07:20 +0000 | [diff] [blame] | 769 | if (psci_handler(vcpu, func, args.arg1, args.arg2, args.arg3, |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 770 | &vcpu->regs.r[0], next)) { |
| 771 | return true; |
| 772 | } |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 773 | #endif |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 774 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 775 | if (ffa_handler(&args, vcpu, next)) { |
J-Alves | 1339402 | 2021-06-30 13:48:49 +0100 | [diff] [blame] | 776 | #if SECURE_WORLD == 1 |
| 777 | /* |
| 778 | * If giving back execution to the NWd, check if the Schedule |
Olivier Deprez | 618c8fc | 2022-05-30 15:27:49 +0200 | [diff] [blame] | 779 | * Receiver Interrupt has been delayed, and trigger it on |
| 780 | * current core if so. |
J-Alves | 1339402 | 2021-06-30 13:48:49 +0100 | [diff] [blame] | 781 | */ |
| 782 | if ((*next != NULL && (*next)->vm->id == HF_OTHER_WORLD_ID) || |
| 783 | (*next == NULL && vcpu->vm->id == HF_OTHER_WORLD_ID)) { |
| 784 | plat_ffa_sri_trigger_if_delayed(vcpu->cpu); |
| 785 | } |
| 786 | #endif |
Karl Meakin | 6eeec8e | 2024-03-07 18:07:20 +0000 | [diff] [blame] | 787 | if (func != FFA_VERSION_32) { |
| 788 | struct vm_locked vm_locked = vm_lock(vcpu->vm); |
| 789 | |
| 790 | vm_locked.vm->ffa_version_negotiated = true; |
| 791 | vm_unlock(&vm_locked); |
| 792 | } |
| 793 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 794 | arch_regs_set_retval(&vcpu->regs, args); |
J-Alves | 6f6bf8a | 2024-07-25 15:17:57 +0100 | [diff] [blame] | 795 | |
| 796 | /* |
| 797 | * In case there has been an update after handling the last |
| 798 | * ff-a call, update the next vCPU directly in the |
| 799 | * register. |
| 800 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 801 | vcpu_update_virtual_interrupts(*next); |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 802 | return true; |
| 803 | } |
| 804 | |
| 805 | return false; |
| 806 | } |
| 807 | |
| 808 | /** |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 809 | * Processes SMC instruction calls. |
| 810 | */ |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 811 | static struct vcpu *smc_handler(struct vcpu *vcpu) |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 812 | { |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 813 | struct ffa_value args = arch_regs_get_args(&vcpu->regs); |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 814 | struct vcpu *next = NULL; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 815 | |
Olivier Deprez | 79dbd6f | 2023-11-29 16:12:36 +0100 | [diff] [blame] | 816 | /* Mask out SMCCC SVE hint bit from function id. */ |
| 817 | args.func &= ~SMCCC_SVE_HINT_MASK; |
| 818 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 819 | if (hvc_smc_handler(args, vcpu, &next)) { |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 820 | return next; |
Andrew Walbran | 4579f700 | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 821 | } |
| 822 | |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 823 | smc_forwarder(vcpu->vm, &args); |
| 824 | arch_regs_set_retval(&vcpu->regs, args); |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 825 | return NULL; |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 826 | } |
| 827 | |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 828 | #if SECURE_WORLD == 1 |
| 829 | |
| 830 | /** |
| 831 | * Called from other_world_loop return from SMC. |
| 832 | * Processes SMC calls originating from the NWd. |
| 833 | */ |
| 834 | struct vcpu *smc_handler_from_nwd(struct vcpu *vcpu) |
| 835 | { |
Olivier Deprez | 79dbd6f | 2023-11-29 16:12:36 +0100 | [diff] [blame] | 836 | struct ffa_value args = arch_regs_get_args(&vcpu->regs); |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 837 | struct vcpu *next = NULL; |
| 838 | |
Olivier Deprez | 5b58833 | 2023-09-05 15:08:48 +0200 | [diff] [blame] | 839 | plat_save_ns_simd_context(vcpu); |
| 840 | |
Olivier Deprez | 79dbd6f | 2023-11-29 16:12:36 +0100 | [diff] [blame] | 841 | /* Mask out SMCCC SVE hint bit from function id. */ |
| 842 | args.func &= ~SMCCC_SVE_HINT_MASK; |
| 843 | |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 844 | if (hvc_smc_handler(args, vcpu, &next)) { |
| 845 | return next; |
| 846 | } |
| 847 | |
| 848 | /* |
| 849 | * If the SMC emitted by the normal world is not handled in the secure |
| 850 | * world then return an error stating such ABI is not supported. Only |
| 851 | * FF-A calls are supported. We cannot return SMCCC_ERROR_UNKNOWN |
| 852 | * directly because the SPMD smc handler would not recognize it as a |
| 853 | * standard FF-A call returning from the SPMC. |
| 854 | */ |
| 855 | arch_regs_set_retval(&vcpu->regs, ffa_error(FFA_NOT_SUPPORTED)); |
| 856 | |
| 857 | return NULL; |
| 858 | } |
| 859 | |
| 860 | #endif |
| 861 | |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 862 | /* |
| 863 | * Exception vector offsets. |
| 864 | * See Arm Architecture Reference Manual Armv8-A, D1.10.2. |
| 865 | */ |
| 866 | |
| 867 | /** |
| 868 | * Offset for synchronous exceptions at current EL with SPx. |
| 869 | */ |
| 870 | #define OFFSET_CURRENT_SPX UINT64_C(0x200) |
| 871 | |
| 872 | /** |
| 873 | * Offset for synchronous exceptions at lower EL using AArch64. |
| 874 | */ |
| 875 | #define OFFSET_LOWER_EL_64 UINT64_C(0x400) |
| 876 | |
| 877 | /** |
| 878 | * Offset for synchronous exceptions at lower EL using AArch32. |
| 879 | */ |
| 880 | #define OFFSET_LOWER_EL_32 UINT64_C(0x600) |
| 881 | |
| 882 | /** |
| 883 | * Returns the address for the exception handler at EL1. |
| 884 | */ |
| 885 | static uintreg_t get_el1_exception_handler_addr(const struct vcpu *vcpu) |
| 886 | { |
Raghu Krishnamurthy | 32626c9 | 2021-01-17 09:57:29 -0800 | [diff] [blame] | 887 | uintreg_t base_addr = has_vhe_support() ? read_msr(MSR_VBAR_EL12) |
| 888 | : read_msr(vbar_el1); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 889 | uintreg_t pe_mode = vcpu->regs.spsr & PSR_PE_MODE_MASK; |
| 890 | bool is_arch32 = vcpu->regs.spsr & PSR_ARCH_MODE_32; |
| 891 | |
| 892 | if (pe_mode == PSR_PE_MODE_EL0T) { |
| 893 | if (is_arch32) { |
| 894 | base_addr += OFFSET_LOWER_EL_32; |
| 895 | } else { |
| 896 | base_addr += OFFSET_LOWER_EL_64; |
| 897 | } |
| 898 | } else { |
| 899 | CHECK(!is_arch32); |
| 900 | base_addr += OFFSET_CURRENT_SPX; |
| 901 | } |
| 902 | |
| 903 | return base_addr; |
| 904 | } |
| 905 | |
| 906 | /** |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 907 | * Injects an exception with the specified Exception Syndrom Register value into |
| 908 | * the EL1. |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 909 | * |
| 910 | * NOTE: This function assumes that the lazy registers haven't been saved, and |
| 911 | * writes to the lazy registers of the CPU directly instead of the vCPU. |
| 912 | */ |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 913 | static void inject_el1_exception(struct vcpu *vcpu, uintreg_t esr_el1_value, |
| 914 | uintreg_t far_el1_value) |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 915 | { |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 916 | uintreg_t handler_address = get_el1_exception_handler_addr(vcpu); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 917 | |
| 918 | /* Update the CPU state to inject the exception. */ |
Raghu Krishnamurthy | 32626c9 | 2021-01-17 09:57:29 -0800 | [diff] [blame] | 919 | if (has_vhe_support()) { |
| 920 | write_msr(MSR_ESR_EL12, esr_el1_value); |
| 921 | write_msr(MSR_FAR_EL12, far_el1_value); |
| 922 | write_msr(MSR_ELR_EL12, vcpu->regs.pc); |
| 923 | write_msr(MSR_SPSR_EL12, vcpu->regs.spsr); |
| 924 | } else { |
| 925 | write_msr(esr_el1, esr_el1_value); |
| 926 | write_msr(far_el1, far_el1_value); |
| 927 | write_msr(elr_el1, vcpu->regs.pc); |
| 928 | write_msr(spsr_el1, vcpu->regs.spsr); |
| 929 | } |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 930 | |
| 931 | /* |
| 932 | * Mask (disable) interrupts and run in EL1h mode. |
| 933 | * EL1h mode is used because by default, taking an exception selects the |
| 934 | * stack pointer for the target Exception level. The software can change |
| 935 | * that later in the handler if needed. |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 936 | */ |
| 937 | vcpu->regs.spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H; |
| 938 | |
| 939 | /* Transfer control to the exception hander. */ |
| 940 | vcpu->regs.pc = handler_address; |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 941 | } |
| 942 | |
| 943 | /** |
| 944 | * Injects a Data Abort exception (same exception level). |
| 945 | */ |
| 946 | static void inject_el1_data_abort_exception(struct vcpu *vcpu, |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 947 | uintreg_t esr_el2, |
| 948 | uintreg_t far_el2) |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 949 | { |
| 950 | /* |
| 951 | * ISS encoding remains the same, but the EC is changed to reflect |
| 952 | * where the exception came from. |
| 953 | * See Arm Architecture Reference Manual Armv8-A, pages D13-2943/2982. |
| 954 | */ |
| 955 | uintreg_t esr_el1_value = GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) | |
| 956 | (EC_DATA_ABORT_SAME_EL << ESR_EC_OFFSET); |
| 957 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 958 | dlog_notice("Injecting Data Abort exception into VM %#x.\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 959 | vcpu->vm->id); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 960 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 961 | inject_el1_exception(vcpu, esr_el1_value, far_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 962 | } |
| 963 | |
| 964 | /** |
| 965 | * Injects a Data Abort exception (same exception level). |
| 966 | */ |
| 967 | static void inject_el1_instruction_abort_exception(struct vcpu *vcpu, |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 968 | uintreg_t esr_el2, |
| 969 | uintreg_t far_el2) |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 970 | { |
| 971 | /* |
| 972 | * ISS encoding remains the same, but the EC is changed to reflect |
| 973 | * where the exception came from. |
| 974 | * See Arm Architecture Reference Manual Armv8-A, pages D13-2941/2980. |
| 975 | */ |
| 976 | uintreg_t esr_el1_value = |
| 977 | GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) | |
| 978 | (EC_INSTRUCTION_ABORT_SAME_EL << ESR_EC_OFFSET); |
| 979 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 980 | dlog_notice("Injecting Instruction Abort exception into VM %#x.\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 981 | vcpu->vm->id); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 982 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 983 | inject_el1_exception(vcpu, esr_el1_value, far_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 984 | } |
| 985 | |
| 986 | /** |
| 987 | * Injects an exception with an unknown reason into the EL1. |
| 988 | */ |
| 989 | static void inject_el1_unknown_exception(struct vcpu *vcpu, uintreg_t esr_el2) |
| 990 | { |
| 991 | uintreg_t esr_el1_value = |
| 992 | GET_ESR_IL(esr_el2) | (EC_UNKNOWN << ESR_EC_OFFSET); |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 993 | |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 994 | dlog_notice("Injecting Unknown Reason exception into VM %#x.\n", |
| 995 | vcpu->vm->id); |
| 996 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 997 | /* |
| 998 | * The value of the far_el2 register is UNKNOWN in this case, |
| 999 | * therefore, don't propagate it to avoid leaking sensitive information. |
| 1000 | */ |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 1001 | inject_el1_exception(vcpu, esr_el1_value, 0); |
| 1002 | } |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 1003 | |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 1004 | /** |
| 1005 | * Injects an exception because of a system register trap. |
| 1006 | */ |
| 1007 | static void inject_el1_sysreg_trap_exception(struct vcpu *vcpu, |
| 1008 | uintreg_t esr_el2) |
| 1009 | { |
| 1010 | char *direction_str = ISS_IS_READ(esr_el2) ? "read" : "write"; |
| 1011 | |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1012 | dlog_notice( |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 1013 | "Trapped access to system register %s: op0=%lu, op1=%lu, " |
| 1014 | "crn=%lu, " |
| 1015 | "crm=%lu, op2=%lu, rt=%lu.\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1016 | direction_str, GET_ISS_OP0(esr_el2), GET_ISS_OP1(esr_el2), |
| 1017 | GET_ISS_CRN(esr_el2), GET_ISS_CRM(esr_el2), |
| 1018 | GET_ISS_OP2(esr_el2), GET_ISS_RT(esr_el2)); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 1019 | |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 1020 | inject_el1_unknown_exception(vcpu, esr_el2); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 1021 | } |
| 1022 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 1023 | static struct vcpu *hvc_handler(struct vcpu *vcpu) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1024 | { |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 1025 | struct ffa_value args = arch_regs_get_args(&vcpu->regs); |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1026 | struct vcpu *next = NULL; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1027 | |
Olivier Deprez | 79dbd6f | 2023-11-29 16:12:36 +0100 | [diff] [blame] | 1028 | /* Mask out SMCCC SVE hint bit from function id. */ |
| 1029 | args.func &= ~SMCCC_SVE_HINT_MASK; |
| 1030 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 1031 | if (hvc_smc_handler(args, vcpu, &next)) { |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1032 | return next; |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 1033 | } |
Jose Marinho | fc0b2b6 | 2019-06-06 11:18:45 +0100 | [diff] [blame] | 1034 | |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 1035 | switch (args.func) { |
J-Alves | 15e3026 | 2024-10-14 11:56:07 +0100 | [diff] [blame] | 1036 | #if SECURE_WORLD == 1 |
Madhukar Pappireddy | f675bb6 | 2021-08-03 12:57:10 -0500 | [diff] [blame] | 1037 | case HF_INTERRUPT_DEACTIVATE: |
| 1038 | vcpu->regs.r[0] = plat_ffa_interrupt_deactivate( |
| 1039 | args.arg1, args.arg2, vcpu); |
| 1040 | break; |
Madhukar Pappireddy | 72d2393 | 2023-07-24 15:57:28 -0500 | [diff] [blame] | 1041 | |
| 1042 | case HF_INTERRUPT_RECONFIGURE: |
| 1043 | vcpu->regs.r[0] = plat_ffa_interrupt_reconfigure( |
| 1044 | args.arg1, args.arg2, args.arg3, vcpu); |
| 1045 | break; |
Daniel Boulby | f3cf28c | 2024-08-22 10:46:23 +0100 | [diff] [blame] | 1046 | |
| 1047 | case HF_INTERRUPT_SEND_IPI: |
| 1048 | vcpu->regs.r[0] = api_hf_interrupt_send_ipi(args.arg1, vcpu); |
| 1049 | break; |
Madhukar Pappireddy | f675bb6 | 2021-08-03 12:57:10 -0500 | [diff] [blame] | 1050 | #endif |
Olivier Deprez | 109c6d4 | 2023-11-29 14:58:47 +0100 | [diff] [blame] | 1051 | case HF_INTERRUPT_ENABLE: |
| 1052 | vcpu->regs.r[0] = api_interrupt_enable(args.arg1, args.arg2, |
| 1053 | args.arg3, vcpu); |
| 1054 | break; |
| 1055 | |
Madhukar Pappireddy | c64d064 | 2024-08-07 16:55:46 -0500 | [diff] [blame] | 1056 | case HF_INTERRUPT_GET: { |
| 1057 | struct vcpu_locked current_locked; |
Madhukar Pappireddy | f675bb6 | 2021-08-03 12:57:10 -0500 | [diff] [blame] | 1058 | |
Madhukar Pappireddy | c64d064 | 2024-08-07 16:55:46 -0500 | [diff] [blame] | 1059 | current_locked = vcpu_lock(vcpu); |
| 1060 | vcpu->regs.r[0] = plat_ffa_interrupt_get(current_locked); |
| 1061 | vcpu_unlock(¤t_locked); |
| 1062 | break; |
| 1063 | } |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1064 | default: |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1065 | vcpu->regs.r[0] = SMCCC_ERROR_UNKNOWN; |
J-Alves | 3317240 | 2024-08-15 13:15:34 +0100 | [diff] [blame] | 1066 | dlog_verbose("Unsupported function %#lx\n", args.func); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1067 | } |
| 1068 | |
J-Alves | 6f6bf8a | 2024-07-25 15:17:57 +0100 | [diff] [blame] | 1069 | /* |
| 1070 | * In case there has been an update after handling the last |
| 1071 | * hypervisor call, update the next vCPU directly in the register. |
| 1072 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 1073 | vcpu_update_virtual_interrupts(next); |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 1074 | |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1075 | return next; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1076 | } |
| 1077 | |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 1078 | struct vcpu *irq_lower(void) |
| 1079 | { |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 1080 | #if SECURE_WORLD == 1 |
| 1081 | struct vcpu *next = NULL; |
| 1082 | |
J-Alves | 03edf40 | 2023-07-21 15:13:49 +0100 | [diff] [blame] | 1083 | plat_ffa_handle_secure_interrupt(current(), &next); |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 1084 | |
| 1085 | /* |
| 1086 | * Since we are in interrupt context, set the bit for the |
| 1087 | * next vCPU directly in the register. |
| 1088 | */ |
| 1089 | vcpu_update_virtual_interrupts(next); |
| 1090 | |
| 1091 | return next; |
| 1092 | #else |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 1093 | /* |
| 1094 | * Switch back to primary VM, interrupts will be handled there. |
| 1095 | * |
| 1096 | * If the VM has aborted, this vCPU will be aborted when the scheduler |
| 1097 | * tries to run it again. This means the interrupt will not be delayed |
| 1098 | * by the aborted VM. |
| 1099 | * |
| 1100 | * TODO: Only switch when the interrupt isn't for the current VM. |
| 1101 | */ |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame] | 1102 | return api_preempt(current()); |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 1103 | #endif |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 1104 | } |
| 1105 | |
Madhukar Pappireddy | 7fc585e | 2023-03-02 14:31:22 -0600 | [diff] [blame] | 1106 | #if SECURE_WORLD == 1 |
| 1107 | static void spmd_group0_intr_delegate(void) |
| 1108 | { |
| 1109 | struct ffa_value ret; |
| 1110 | |
| 1111 | dlog_verbose("Delegating Group0 interrupt to SPMD\n"); |
| 1112 | |
| 1113 | ret = smc_ffa_call((struct ffa_value){.func = FFA_EL3_INTR_HANDLE_32}); |
| 1114 | |
| 1115 | /* Check if the Group0 interrupt was handled successfully. */ |
| 1116 | CHECK(ret.func == FFA_SUCCESS_32); |
| 1117 | } |
| 1118 | #endif |
| 1119 | |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 1120 | struct vcpu *fiq_lower(void) |
| 1121 | { |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 1122 | #if SECURE_WORLD == 1 |
| 1123 | struct vcpu_locked current_locked; |
| 1124 | struct vcpu *current_vcpu = current(); |
Daniel Boulby | 4dd3f53 | 2021-09-21 09:57:08 +0100 | [diff] [blame] | 1125 | int64_t ret; |
Madhukar Pappireddy | 77d3bcd | 2023-03-01 17:26:22 -0600 | [diff] [blame] | 1126 | uint32_t intid; |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 1127 | |
Madhukar Pappireddy | 77d3bcd | 2023-03-01 17:26:22 -0600 | [diff] [blame] | 1128 | intid = get_highest_pending_g0_interrupt_id(); |
| 1129 | |
| 1130 | /* Check for the highest priority pending Group0 interrupt. */ |
| 1131 | if (intid != SPURIOUS_INTID_OTHER_WORLD) { |
Madhukar Pappireddy | 7fc585e | 2023-03-02 14:31:22 -0600 | [diff] [blame] | 1132 | /* Delegate handling of Group0 interrupt to EL3 firmware. */ |
| 1133 | spmd_group0_intr_delegate(); |
| 1134 | |
| 1135 | /* Resume current vCPU. */ |
| 1136 | return NULL; |
Madhukar Pappireddy | 77d3bcd | 2023-03-01 17:26:22 -0600 | [diff] [blame] | 1137 | } |
| 1138 | |
| 1139 | /* |
| 1140 | * A special interrupt indicating there is no pending interrupt |
| 1141 | * with sufficient priority for current security state. This |
| 1142 | * means a non-secure interrupt is pending. |
| 1143 | */ |
Madhukar Pappireddy | c40f55f | 2022-06-22 11:00:41 -0500 | [diff] [blame] | 1144 | assert(current_vcpu->vm->ns_interrupts_action != NS_ACTION_QUEUED); |
| 1145 | |
Maksims Svecovs | 9ddf86a | 2021-05-06 17:17:21 +0100 | [diff] [blame] | 1146 | if (plat_ffa_vm_managed_exit_supported(current_vcpu->vm)) { |
Madhukar Pappireddy | dd6fdfb | 2021-12-14 12:30:36 -0600 | [diff] [blame] | 1147 | uint8_t pmr = plat_interrupts_get_priority_mask(); |
| 1148 | |
Madhukar Pappireddy | 025a451 | 2024-10-14 22:09:19 -0500 | [diff] [blame] | 1149 | /* |
| 1150 | * Mask non-secure interrupt from triggering again till the |
| 1151 | * vCPU completes the managed exit sequenece. |
| 1152 | */ |
| 1153 | plat_interrupts_set_priority_mask(SWD_MASK_NS_INT); |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 1154 | |
| 1155 | current_locked = vcpu_lock(current_vcpu); |
Madhukar Pappireddy | 025a451 | 2024-10-14 22:09:19 -0500 | [diff] [blame] | 1156 | current_vcpu->prev_interrupt_priority = pmr; |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 1157 | ret = api_interrupt_inject_locked(current_locked, |
| 1158 | HF_MANAGED_EXIT_INTID, |
Madhukar Pappireddy | bd10e57 | 2023-03-06 16:39:49 -0600 | [diff] [blame] | 1159 | current_locked, NULL); |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 1160 | if (ret != 0) { |
| 1161 | panic("Failed to inject managed exit interrupt\n"); |
| 1162 | } |
| 1163 | |
| 1164 | /* Entering managed exit sequence. */ |
| 1165 | current_vcpu->processing_managed_exit = true; |
| 1166 | |
| 1167 | vcpu_unlock(¤t_locked); |
| 1168 | |
| 1169 | /* |
| 1170 | * Since we are in interrupt context, set the bit for the |
| 1171 | * current vCPU directly in the register. |
| 1172 | */ |
| 1173 | vcpu_update_virtual_interrupts(NULL); |
| 1174 | |
| 1175 | /* Resume current vCPU. */ |
| 1176 | return NULL; |
| 1177 | } |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 1178 | |
Madhukar Pappireddy | d46c06e | 2022-06-21 18:14:52 -0500 | [diff] [blame] | 1179 | /* |
| 1180 | * Unwind Normal World Scheduled Call chain in response to NS |
| 1181 | * Interrupt. |
| 1182 | */ |
| 1183 | return plat_ffa_unwind_nwd_call_chain_interrupt(current_vcpu); |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 1184 | #else |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 1185 | return irq_lower(); |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 1186 | #endif |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 1187 | } |
| 1188 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 1189 | noreturn struct vcpu *serr_lower(void) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 1190 | { |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 1191 | /* |
| 1192 | * SError exceptions should be isolated and handled by the responsible |
| 1193 | * VM/exception level. Getting here indicates a bug, that isolation is |
| 1194 | * not working, or a processor that does not support ARMv8.2-IESB, in |
| 1195 | * which case Hafnium routes SError exceptions to EL2 (here). |
| 1196 | */ |
| 1197 | panic("SError from a lower exception level."); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 1198 | } |
| 1199 | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1200 | /** |
| 1201 | * Initialises a fault info structure. It assumes that an FnV bit exists at |
| 1202 | * bit offset 10 of the ESR, and that it is only valid when the bottom 6 bits of |
| 1203 | * the ESR (the fault status code) are 010000; this is the case for both |
| 1204 | * instruction and data aborts, but not necessarily for other exception reasons. |
| 1205 | */ |
| 1206 | static struct vcpu_fault_info fault_info_init(uintreg_t esr, |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1207 | const struct vcpu *vcpu, |
| 1208 | uint32_t mode) |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1209 | { |
| 1210 | uint32_t fsc = esr & 0x3f; |
| 1211 | struct vcpu_fault_info r; |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 1212 | uint64_t hpfar_el2_val; |
| 1213 | uint64_t hpfar_el2_fipa; |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1214 | |
| 1215 | r.mode = mode; |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1216 | r.pc = va_init(vcpu->regs.pc); |
| 1217 | |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 1218 | /* Get Hypervisor IPA Fault Address value. */ |
| 1219 | hpfar_el2_val = read_msr(hpfar_el2); |
| 1220 | |
| 1221 | /* Extract Faulting IPA. */ |
| 1222 | hpfar_el2_fipa = (hpfar_el2_val & HPFAR_EL2_FIPA) << 8; |
| 1223 | |
| 1224 | #if SECURE_WORLD == 1 |
| 1225 | |
| 1226 | /** |
| 1227 | * Determine if faulting IPA targets NS space. |
| 1228 | * At NS-EL2 hpfar_el2 bit 63 is RES0. At S-EL2, this bit determines if |
| 1229 | * the faulting Stage-1 address output is a secure or non-secure IPA. |
| 1230 | */ |
| 1231 | if ((hpfar_el2_val & HPFAR_EL2_NS) != 0) { |
| 1232 | r.mode |= MM_MODE_NS; |
| 1233 | } |
| 1234 | |
| 1235 | #endif |
| 1236 | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1237 | /* |
| 1238 | * Check the FnV bit, which is only valid if dfsc/ifsc is 010000. It |
| 1239 | * indicates that we cannot rely on far_el2. |
| 1240 | */ |
Karl Meakin | 5a13355 | 2024-05-30 16:06:27 +0100 | [diff] [blame] | 1241 | if (fsc == 0x10 && GET_ESR_FNV(esr)) { |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1242 | r.vaddr = va_init(0); |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 1243 | r.ipaddr = ipa_init(hpfar_el2_fipa); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1244 | } else { |
| 1245 | r.vaddr = va_init(read_msr(far_el2)); |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 1246 | r.ipaddr = ipa_init(hpfar_el2_fipa | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1247 | (read_msr(far_el2) & (PAGE_SIZE - 1))); |
| 1248 | } |
| 1249 | |
| 1250 | return r; |
| 1251 | } |
| 1252 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 1253 | struct vcpu *sync_lower_exception(uintreg_t esr, uintreg_t far) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1254 | { |
Wedson Almeida Filho | 00df6c7 | 2018-10-18 11:19:24 +0100 | [diff] [blame] | 1255 | struct vcpu *vcpu = current(); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1256 | struct vcpu_fault_info info; |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1257 | struct vcpu *new_vcpu = NULL; |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1258 | uintreg_t ec = GET_ESR_EC(esr); |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1259 | bool is_el0_partition = vcpu->vm->el0_partition; |
Raghu Krishnamurthy | f16b2ce | 2021-11-02 07:48:38 -0700 | [diff] [blame] | 1260 | bool resume = false; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1261 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1262 | switch (ec) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1263 | case EC_WFI_WFE: |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 1264 | /* Skip the instruction. */ |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1265 | vcpu->regs.pc += GET_NEXT_PC_INC(esr); |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1266 | |
| 1267 | /* |
| 1268 | * For EL0 partitions, treat both WFI and WFE the same way so |
| 1269 | * that FFA_RUN can be called on the partition to resume it. If |
| 1270 | * we treat WFI using api_wait_for_interrupt, the VCPU will be |
| 1271 | * in blocked waiting for interrupt but we cannot inject |
| 1272 | * interrupts into EL0 partitions. |
| 1273 | */ |
| 1274 | if (is_el0_partition) { |
Madhukar Pappireddy | 184501c | 2023-05-23 17:24:06 -0500 | [diff] [blame] | 1275 | api_yield(vcpu, &new_vcpu, NULL); |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1276 | return new_vcpu; |
| 1277 | } |
| 1278 | |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 1279 | /* Check TI bit of ISS, 0 = WFI, 1 = WFE. */ |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 1280 | if (esr & 1) { |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 1281 | /* WFE */ |
| 1282 | /* |
| 1283 | * TODO: consider giving the scheduler more context, |
| 1284 | * somehow. |
| 1285 | */ |
Madhukar Pappireddy | 184501c | 2023-05-23 17:24:06 -0500 | [diff] [blame] | 1286 | api_yield(vcpu, &new_vcpu, NULL); |
Jose Marinho | 135dff3 | 2019-02-28 10:25:57 +0000 | [diff] [blame] | 1287 | return new_vcpu; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 1288 | } |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 1289 | /* WFI */ |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 1290 | return api_wait_for_interrupt(vcpu); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1291 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1292 | case EC_DATA_ABORT_LOWER_EL: |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1293 | info = fault_info_init( |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 1294 | esr, vcpu, (esr & (1U << 6)) ? MM_MODE_W : MM_MODE_R); |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1295 | |
Raghu Krishnamurthy | f16b2ce | 2021-11-02 07:48:38 -0700 | [diff] [blame] | 1296 | resume = vcpu_handle_page_fault(vcpu, &info); |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1297 | if (is_el0_partition) { |
| 1298 | dlog_warning("Data abort on EL0 partition\n"); |
Raghu Krishnamurthy | f16b2ce | 2021-11-02 07:48:38 -0700 | [diff] [blame] | 1299 | /* |
| 1300 | * Abort EL0 context if we should not resume the |
| 1301 | * context, or it is an alignment fault. |
| 1302 | * vcpu_handle_page_fault() only checks the mode of the |
| 1303 | * page in an architecture agnostic way but alignment |
| 1304 | * faults on aarch64 can happen on a correctly mapped |
| 1305 | * page. |
| 1306 | */ |
| 1307 | if (!resume || ((esr & 0x3f) == 0x21)) { |
| 1308 | return api_abort(vcpu); |
| 1309 | } |
| 1310 | } |
| 1311 | |
| 1312 | if (resume) { |
| 1313 | return NULL; |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1314 | } |
| 1315 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1316 | /* Inform the EL1 of the data abort. */ |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 1317 | inject_el1_data_abort_exception(vcpu, esr, far); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1318 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1319 | /* Schedule the same VM to continue running. */ |
| 1320 | return NULL; |
| 1321 | |
| 1322 | case EC_INSTRUCTION_ABORT_LOWER_EL: |
Andrew Scull | d3cfaad | 2019-04-04 11:34:10 +0100 | [diff] [blame] | 1323 | info = fault_info_init(esr, vcpu, MM_MODE_X); |
Raghu Krishnamurthy | f16b2ce | 2021-11-02 07:48:38 -0700 | [diff] [blame] | 1324 | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1325 | if (vcpu_handle_page_fault(vcpu, &info)) { |
| 1326 | return NULL; |
| 1327 | } |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1328 | |
| 1329 | if (is_el0_partition) { |
| 1330 | dlog_warning("Instruction abort on EL0 partition\n"); |
| 1331 | return api_abort(vcpu); |
| 1332 | } |
| 1333 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1334 | /* Inform the EL1 of the instruction abort. */ |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 1335 | inject_el1_instruction_abort_exception(vcpu, esr, far); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 1336 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1337 | /* Schedule the same VM to continue running. */ |
| 1338 | return NULL; |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1339 | case EC_SVC: |
| 1340 | CHECK(is_el0_partition); |
| 1341 | return hvc_handler(vcpu); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1342 | case EC_HVC: |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1343 | if (is_el0_partition) { |
| 1344 | dlog_warning("Unexpected HVC Trap on EL0 partition\n"); |
| 1345 | return api_abort(vcpu); |
| 1346 | } |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1347 | return hvc_handler(vcpu); |
| 1348 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1349 | case EC_SMC: { |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 1350 | uintreg_t smc_pc = vcpu->regs.pc; |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 1351 | struct vcpu *next = smc_handler(vcpu); |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 1352 | |
| 1353 | /* Skip the SMC instruction. */ |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1354 | vcpu->regs.pc = smc_pc + GET_NEXT_PC_INC(esr); |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 1355 | |
Andrew Walbran | 3364565 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 1356 | return next; |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 1357 | } |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 1358 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1359 | case EC_MSR: |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1360 | /* |
| 1361 | * NOTE: This should never be reached because it goes through a |
| 1362 | * separate path handled by handle_system_register_access(). |
| 1363 | */ |
| 1364 | panic("Handled by handle_system_register_access()."); |
| 1365 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1366 | default: |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1367 | dlog_notice( |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 1368 | "Unknown lower sync exception pc=%#lx, esr=%#lx, " |
| 1369 | "ec=%#lx\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1370 | vcpu->regs.pc, esr, ec); |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 1371 | break; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1372 | } |
| 1373 | |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1374 | if (is_el0_partition) { |
| 1375 | return api_abort(vcpu); |
| 1376 | } |
| 1377 | |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1378 | /* |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 1379 | * The exception wasn't handled. Inject to the VM to give it chance to |
| 1380 | * handle as an unknown exception. |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1381 | */ |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1382 | inject_el1_unknown_exception(vcpu, esr); |
| 1383 | |
| 1384 | /* Schedule the same VM to continue running. */ |
| 1385 | return NULL; |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1386 | } |
| 1387 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1388 | /** |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 1389 | * Handles EC = 011000, MSR, MRS instruction traps. |
Fuad Tabba | ed294af | 2019-12-20 10:43:01 +0000 | [diff] [blame] | 1390 | * Returns non-null ONLY if the access failed and the vCPU is changing. |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1391 | */ |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1392 | void handle_system_register_access(uintreg_t esr_el2) |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1393 | { |
| 1394 | struct vcpu *vcpu = current(); |
J-Alves | 19e20cf | 2023-08-02 12:48:55 +0100 | [diff] [blame] | 1395 | ffa_id_t vm_id = vcpu->vm->id; |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1396 | uintreg_t ec = GET_ESR_EC(esr_el2); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1397 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1398 | CHECK(ec == EC_MSR); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1399 | /* |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1400 | * Handle accesses to debug and performance monitor registers. |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1401 | * Inject an exception for unhandled/unsupported registers. |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1402 | */ |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1403 | if (debug_el1_is_register_access(esr_el2)) { |
| 1404 | if (!debug_el1_process_access(vcpu, vm_id, esr_el2)) { |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 1405 | inject_el1_sysreg_trap_exception(vcpu, esr_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1406 | return; |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1407 | } |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1408 | } else if (perfmon_is_register_access(esr_el2)) { |
| 1409 | if (!perfmon_process_access(vcpu, vm_id, esr_el2)) { |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 1410 | inject_el1_sysreg_trap_exception(vcpu, esr_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1411 | return; |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1412 | } |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 1413 | } else if (feature_id_is_register_access(esr_el2)) { |
| 1414 | if (!feature_id_process_access(vcpu, esr_el2)) { |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 1415 | inject_el1_sysreg_trap_exception(vcpu, esr_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1416 | return; |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 1417 | } |
Madhukar Pappireddy | f684d19 | 2024-09-25 14:35:57 -0500 | [diff] [blame] | 1418 | } else if (el1_physical_timer_is_register_access(esr_el2)) { |
| 1419 | if (!el1_physical_timer_process_access(vcpu, esr_el2)) { |
| 1420 | inject_el1_sysreg_trap_exception(vcpu, esr_el2); |
| 1421 | return; |
| 1422 | } |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1423 | } else { |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 1424 | inject_el1_sysreg_trap_exception(vcpu, esr_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1425 | return; |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1426 | } |
| 1427 | |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1428 | /* Instruction was fulfilled. Skip it and run the next one. */ |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1429 | vcpu->regs.pc += GET_NEXT_PC_INC(esr_el2); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1430 | } |