Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 9 | #include <stdnoreturn.h> |
| 10 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 11 | #include "hf/arch/barriers.h" |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 12 | #include "hf/arch/init.h" |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 13 | #include "hf/arch/mmu.h" |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 14 | #include "hf/arch/plat/smc.h" |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 15 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 16 | #include "hf/api.h" |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 17 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 18 | #include "hf/cpu.h" |
| 19 | #include "hf/dlog.h" |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 20 | #include "hf/ffa.h" |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 21 | #include "hf/ffa_internal.h" |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 22 | #include "hf/panic.h" |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 23 | #include "hf/plat/interrupts.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 24 | #include "hf/vm.h" |
| 25 | |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 26 | #include "vmapi/hf/call.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 27 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 28 | #include "debug_el1.h" |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 29 | #include "feature_id.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 30 | #include "msr.h" |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 31 | #include "perfmon.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 32 | #include "psci.h" |
Andrew Walbran | 3364565 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 33 | #include "psci_handler.h" |
Andrew Scull | 7fd4bb7 | 2018-12-08 23:40:12 +0000 | [diff] [blame] | 34 | #include "smc.h" |
Fuad Tabba | ba8c44d | 2019-09-23 14:38:58 +0100 | [diff] [blame] | 35 | #include "sysregs.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 36 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 37 | /** |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 38 | * Hypervisor Fault Address Register Non-Secure. |
| 39 | */ |
| 40 | #define HPFAR_EL2_NS (UINT64_C(0x1) << 63) |
| 41 | |
| 42 | /** |
| 43 | * Hypervisor Fault Address Register Faulting IPA. |
| 44 | */ |
| 45 | #define HPFAR_EL2_FIPA (UINT64_C(0xFFFFFFFFFF0)) |
| 46 | |
| 47 | /** |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 48 | * Gets the value to increment for the next PC. |
| 49 | * The ESR encodes whether the instruction is 2 bytes or 4 bytes long. |
| 50 | */ |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 51 | #define GET_NEXT_PC_INC(esr) (GET_ESR_IL(esr) ? 4 : 2) |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 52 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 53 | /** |
Andrew Walbran | 0dd67ff | 2019-09-12 16:38:50 +0100 | [diff] [blame] | 54 | * The Client ID field within X7 for an SMC64 call. |
| 55 | */ |
| 56 | #define CLIENT_ID_MASK UINT64_C(0xffff) |
| 57 | |
| 58 | /** |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 59 | * Returns a reference to the currently executing vCPU. |
| 60 | */ |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 61 | static struct vcpu *current(void) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 62 | { |
| 63 | return (struct vcpu *)read_msr(tpidr_el2); |
| 64 | } |
| 65 | |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 66 | /** |
| 67 | * Saves the state of per-vCPU peripherals, such as the virtual timer, and |
| 68 | * informs the arch-independent sections that registers have been saved. |
| 69 | */ |
| 70 | void complete_saving_state(struct vcpu *vcpu) |
| 71 | { |
Raghu Krishnamurthy | 32626c9 | 2021-01-17 09:57:29 -0800 | [diff] [blame] | 72 | if (has_vhe_support()) { |
| 73 | vcpu->regs.peripherals.cntv_cval_el0 = |
| 74 | read_msr(MSR_CNTV_CVAL_EL02); |
| 75 | vcpu->regs.peripherals.cntv_ctl_el0 = |
| 76 | read_msr(MSR_CNTV_CTL_EL02); |
| 77 | } else { |
| 78 | vcpu->regs.peripherals.cntv_cval_el0 = read_msr(cntv_cval_el0); |
| 79 | vcpu->regs.peripherals.cntv_ctl_el0 = read_msr(cntv_ctl_el0); |
| 80 | } |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 81 | |
| 82 | api_regs_state_saved(vcpu); |
| 83 | |
| 84 | /* |
| 85 | * If switching away from the primary, copy the current EL0 virtual |
| 86 | * timer registers to the corresponding EL2 physical timer registers. |
| 87 | * This is used to emulate the virtual timer for the primary in case it |
| 88 | * should fire while the secondary is running. |
| 89 | */ |
| 90 | if (vcpu->vm->id == HF_PRIMARY_VM_ID) { |
| 91 | /* |
| 92 | * Clear timer control register before copying compare value, to |
| 93 | * avoid a spurious timer interrupt. This could be a problem if |
| 94 | * the interrupt is configured as edge-triggered, as it would |
| 95 | * then be latched in. |
| 96 | */ |
| 97 | write_msr(cnthp_ctl_el2, 0); |
Raghu Krishnamurthy | 32626c9 | 2021-01-17 09:57:29 -0800 | [diff] [blame] | 98 | |
| 99 | if (has_vhe_support()) { |
| 100 | write_msr(cnthp_cval_el2, read_msr(MSR_CNTV_CVAL_EL02)); |
| 101 | write_msr(cnthp_ctl_el2, read_msr(MSR_CNTV_CTL_EL02)); |
| 102 | } else { |
| 103 | write_msr(cnthp_cval_el2, read_msr(cntv_cval_el0)); |
| 104 | write_msr(cnthp_ctl_el2, read_msr(cntv_ctl_el0)); |
| 105 | } |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 106 | } |
| 107 | } |
| 108 | |
| 109 | /** |
| 110 | * Restores the state of per-vCPU peripherals, such as the virtual timer. |
| 111 | */ |
| 112 | void begin_restoring_state(struct vcpu *vcpu) |
| 113 | { |
| 114 | /* |
| 115 | * Clear timer control register before restoring compare value, to avoid |
| 116 | * a spurious timer interrupt. This could be a problem if the interrupt |
| 117 | * is configured as edge-triggered, as it would then be latched in. |
| 118 | */ |
Raghu Krishnamurthy | 32626c9 | 2021-01-17 09:57:29 -0800 | [diff] [blame] | 119 | if (has_vhe_support()) { |
| 120 | write_msr(MSR_CNTV_CTL_EL02, 0); |
| 121 | write_msr(MSR_CNTV_CVAL_EL02, |
| 122 | vcpu->regs.peripherals.cntv_cval_el0); |
| 123 | write_msr(MSR_CNTV_CTL_EL02, |
| 124 | vcpu->regs.peripherals.cntv_ctl_el0); |
| 125 | } else { |
| 126 | write_msr(cntv_ctl_el0, 0); |
| 127 | write_msr(cntv_cval_el0, vcpu->regs.peripherals.cntv_cval_el0); |
| 128 | write_msr(cntv_ctl_el0, vcpu->regs.peripherals.cntv_ctl_el0); |
| 129 | } |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 130 | |
| 131 | /* |
| 132 | * If we are switching (back) to the primary, disable the EL2 physical |
| 133 | * timer which was being used to emulate the EL0 virtual timer, as the |
| 134 | * virtual timer is now running for the primary again. |
| 135 | */ |
| 136 | if (vcpu->vm->id == HF_PRIMARY_VM_ID) { |
| 137 | write_msr(cnthp_ctl_el2, 0); |
| 138 | write_msr(cnthp_cval_el2, 0); |
| 139 | } |
| 140 | } |
| 141 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 142 | /** |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 143 | * Invalidate all stage 1 TLB entries on the current (physical) CPU for the |
| 144 | * current VMID. |
| 145 | */ |
| 146 | static void invalidate_vm_tlb(void) |
| 147 | { |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 148 | /* |
| 149 | * Ensure that the last VTTBR write has taken effect so we invalidate |
| 150 | * the right set of TLB entries. |
| 151 | */ |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 152 | isb(); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 153 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 154 | __asm__ volatile("tlbi vmalle1"); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 155 | |
| 156 | /* |
| 157 | * Ensure that no instructions are fetched for the VM until after the |
| 158 | * TLB invalidation has taken effect. |
| 159 | */ |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 160 | isb(); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 161 | |
| 162 | /* |
| 163 | * Ensure that no data reads or writes for the VM happen until after the |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 164 | * TLB invalidation has taken effect. Non-shareable is enough because |
| 165 | * the TLB is local to the CPU. |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 166 | */ |
David Brazdil | 851948e | 2019-08-09 12:02:12 +0100 | [diff] [blame] | 167 | dsb(nsh); |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 168 | } |
| 169 | |
| 170 | /** |
| 171 | * Invalidates the TLB if a different vCPU is being run than the last vCPU of |
| 172 | * the same VM which was run on the current pCPU. |
| 173 | * |
| 174 | * This is necessary because VMs may (contrary to the architecture |
| 175 | * specification) use inconsistent ASIDs across vCPUs. c.f. KVM's similar |
| 176 | * workaround: |
| 177 | * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=94d0e5980d6791b9 |
| 178 | */ |
| 179 | void maybe_invalidate_tlb(struct vcpu *vcpu) |
| 180 | { |
| 181 | size_t current_cpu_index = cpu_index(vcpu->cpu); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 182 | ffa_vcpu_index_t new_vcpu_index = vcpu_index(vcpu); |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 183 | |
| 184 | if (vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] != |
| 185 | new_vcpu_index) { |
| 186 | /* |
| 187 | * The vCPU has changed since the last time this VM was run on |
| 188 | * this pCPU, so we need to invalidate the TLB. |
| 189 | */ |
| 190 | invalidate_vm_tlb(); |
| 191 | |
| 192 | /* Record the fact that this vCPU is now running on this CPU. */ |
| 193 | vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] = |
| 194 | new_vcpu_index; |
| 195 | } |
| 196 | } |
| 197 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 198 | noreturn void irq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 199 | { |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 200 | (void)elr; |
| 201 | (void)spsr; |
| 202 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 203 | panic("IRQ from current exception level."); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 204 | } |
| 205 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 206 | noreturn void fiq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 207 | { |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 208 | (void)elr; |
| 209 | (void)spsr; |
| 210 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 211 | panic("FIQ from current exception level."); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 212 | } |
| 213 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 214 | noreturn void serr_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 215 | { |
| 216 | (void)elr; |
| 217 | (void)spsr; |
| 218 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 219 | panic("SError from current exception level."); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 220 | } |
| 221 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 222 | noreturn void sync_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 223 | { |
| 224 | uintreg_t esr = read_msr(esr_el2); |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 225 | uintreg_t ec = GET_ESR_EC(esr); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 226 | |
| 227 | (void)spsr; |
| 228 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 229 | switch (ec) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 230 | case EC_DATA_ABORT_SAME_EL: |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 231 | if (!(esr & (1U << 10))) { /* Check FnV bit. */ |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 232 | dlog_error( |
| 233 | "Data abort: pc=%#x, esr=%#x, ec=%#x, " |
| 234 | "far=%#x\n", |
| 235 | elr, esr, ec, read_msr(far_el2)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 236 | } else { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 237 | dlog_error( |
| 238 | "Data abort: pc=%#x, esr=%#x, ec=%#x, " |
| 239 | "far=invalid\n", |
| 240 | elr, esr, ec); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 241 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 242 | |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 243 | break; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 244 | |
| 245 | default: |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 246 | dlog_error( |
| 247 | "Unknown current sync exception pc=%#x, esr=%#x, " |
| 248 | "ec=%#x\n", |
| 249 | elr, esr, ec); |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 250 | break; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 251 | } |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 252 | |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 253 | panic("EL2 exception"); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 254 | } |
| 255 | |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 256 | /** |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 257 | * Sets or clears the VI bit in the HCR_EL2 register saved in the given |
| 258 | * arch_regs. |
| 259 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 260 | static void set_virtual_irq(struct arch_regs *r, bool enable) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 261 | { |
| 262 | if (enable) { |
Raghu Krishnamurthy | 7e925bd | 2020-12-26 10:14:40 -0800 | [diff] [blame] | 263 | r->hcr_el2 |= HCR_EL2_VI; |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 264 | } else { |
Raghu Krishnamurthy | 7e925bd | 2020-12-26 10:14:40 -0800 | [diff] [blame] | 265 | r->hcr_el2 &= ~HCR_EL2_VI; |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 266 | } |
| 267 | } |
| 268 | |
| 269 | /** |
| 270 | * Sets or clears the VI bit in the HCR_EL2 register. |
| 271 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 272 | static void set_virtual_irq_current(bool enable) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 273 | { |
Raghu Krishnamurthy | 7e925bd | 2020-12-26 10:14:40 -0800 | [diff] [blame] | 274 | uintreg_t hcr_el2 = current()->regs.hcr_el2; |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 275 | |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 276 | if (enable) { |
| 277 | hcr_el2 |= HCR_EL2_VI; |
| 278 | } else { |
| 279 | hcr_el2 &= ~HCR_EL2_VI; |
| 280 | } |
Raghu Krishnamurthy | 7e925bd | 2020-12-26 10:14:40 -0800 | [diff] [blame] | 281 | current()->regs.hcr_el2 = hcr_el2; |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 282 | } |
| 283 | |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 284 | /** |
| 285 | * Sets or clears the VF bit in the HCR_EL2 register saved in the given |
| 286 | * arch_regs. |
| 287 | */ |
| 288 | static void set_virtual_fiq(struct arch_regs *r, bool enable) |
| 289 | { |
| 290 | if (enable) { |
Raghu Krishnamurthy | 7e925bd | 2020-12-26 10:14:40 -0800 | [diff] [blame] | 291 | r->hcr_el2 |= HCR_EL2_VF; |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 292 | } else { |
Raghu Krishnamurthy | 7e925bd | 2020-12-26 10:14:40 -0800 | [diff] [blame] | 293 | r->hcr_el2 &= ~HCR_EL2_VF; |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 294 | } |
| 295 | } |
| 296 | |
| 297 | /** |
| 298 | * Sets or clears the VF bit in the HCR_EL2 register. |
| 299 | */ |
| 300 | static void set_virtual_fiq_current(bool enable) |
| 301 | { |
Raghu Krishnamurthy | 7e925bd | 2020-12-26 10:14:40 -0800 | [diff] [blame] | 302 | uintreg_t hcr_el2 = current()->regs.hcr_el2; |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 303 | |
| 304 | if (enable) { |
| 305 | hcr_el2 |= HCR_EL2_VF; |
| 306 | } else { |
| 307 | hcr_el2 &= ~HCR_EL2_VF; |
| 308 | } |
Raghu Krishnamurthy | 7e925bd | 2020-12-26 10:14:40 -0800 | [diff] [blame] | 309 | current()->regs.hcr_el2 = hcr_el2; |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 310 | } |
| 311 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 312 | #if SECURE_WORLD == 1 |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 313 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 314 | static bool sp_boot_next(struct vcpu *current, struct vcpu **next, |
| 315 | struct ffa_value *ffa_ret) |
| 316 | { |
| 317 | struct vm_locked current_vm_locked; |
| 318 | struct vm *vm_next = NULL; |
| 319 | bool ret = false; |
| 320 | |
| 321 | /* |
| 322 | * If VM hasn't been initialized, initialize it and traverse |
| 323 | * booting list following "next_boot" field in the VM structure. |
| 324 | * Once all the SPs have been booted (when "next_boot" is NULL), |
| 325 | * return execution to the NWd. |
| 326 | */ |
| 327 | current_vm_locked = vm_lock(current->vm); |
| 328 | if (current_vm_locked.vm->initialized == false) { |
| 329 | current_vm_locked.vm->initialized = true; |
| 330 | dlog_verbose("Initialized VM: %#x, boot_order: %u\n", |
| 331 | current_vm_locked.vm->id, |
| 332 | current_vm_locked.vm->boot_order); |
| 333 | |
| 334 | if (current_vm_locked.vm->next_boot != NULL) { |
| 335 | current->state = VCPU_STATE_BLOCKED_MAILBOX; |
| 336 | vm_next = current_vm_locked.vm->next_boot; |
| 337 | CHECK(vm_next->initialized == false); |
| 338 | *next = vm_get_vcpu(vm_next, vcpu_index(current)); |
| 339 | arch_regs_reset(*next); |
| 340 | (*next)->cpu = current->cpu; |
| 341 | (*next)->state = VCPU_STATE_RUNNING; |
| 342 | (*next)->regs_available = false; |
| 343 | |
| 344 | *ffa_ret = (struct ffa_value){.func = FFA_INTERRUPT_32}; |
| 345 | ret = true; |
| 346 | goto out; |
| 347 | } |
| 348 | |
| 349 | dlog_verbose("Finished initializing all VMs.\n"); |
| 350 | } |
| 351 | |
| 352 | out: |
| 353 | vm_unlock(¤t_vm_locked); |
| 354 | return ret; |
| 355 | } |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 356 | |
| 357 | /** |
| 358 | * Handle special direct messages from SPMD to SPMC. For now related to power |
| 359 | * management only. |
| 360 | */ |
| 361 | static bool spmd_handler(struct ffa_value *args, struct vcpu *current) |
| 362 | { |
J-Alves | d6f4e14 | 2021-03-05 13:33:59 +0000 | [diff] [blame] | 363 | ffa_vm_id_t sender = ffa_sender(*args); |
| 364 | ffa_vm_id_t receiver = ffa_receiver(*args); |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 365 | ffa_vm_id_t current_vm_id = current->vm->id; |
| 366 | |
| 367 | /* |
| 368 | * Check if direct message request is originating from the SPMD and |
| 369 | * directed to the SPMC. |
| 370 | */ |
| 371 | if (!(sender == HF_SPMD_VM_ID && receiver == HF_SPMC_VM_ID && |
| 372 | current_vm_id == HF_OTHER_WORLD_ID)) { |
| 373 | return false; |
| 374 | } |
| 375 | |
| 376 | switch (args->arg3) { |
| 377 | case PSCI_CPU_OFF: { |
| 378 | struct vm *vm = vm_get_first_boot(); |
| 379 | struct vcpu *vcpu = vm_get_vcpu(vm, vcpu_index(current)); |
| 380 | |
| 381 | /* |
| 382 | * TODO: the PM event reached the SPMC. In a later iteration, |
| 383 | * the PM event can be passed to the SP by resuming it. |
| 384 | */ |
| 385 | *args = (struct ffa_value){ |
| 386 | .func = FFA_MSG_SEND_DIRECT_RESP_32, |
| 387 | .arg1 = ((uint64_t)HF_SPMC_VM_ID << 16) | HF_SPMD_VM_ID, |
| 388 | .arg2 = 0U}; |
| 389 | |
| 390 | dlog_verbose("%s cpu off notification cpuid %#x\n", __func__, |
| 391 | vcpu->cpu->id); |
| 392 | cpu_off(vcpu->cpu); |
| 393 | break; |
| 394 | } |
| 395 | default: |
| 396 | dlog_verbose("%s message not handled %#x\n", __func__, |
| 397 | args->arg3); |
| 398 | return false; |
| 399 | } |
| 400 | |
| 401 | return true; |
| 402 | } |
| 403 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 404 | #endif |
| 405 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 406 | /** |
| 407 | * Checks whether to block an SMC being forwarded from a VM. |
| 408 | */ |
| 409 | static bool smc_is_blocked(const struct vm *vm, uint32_t func) |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 410 | { |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 411 | bool block_by_default = !vm->smc_whitelist.permissive; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 412 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 413 | for (size_t i = 0; i < vm->smc_whitelist.smc_count; ++i) { |
| 414 | if (func == vm->smc_whitelist.smcs[i]) { |
| 415 | return false; |
| 416 | } |
| 417 | } |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 418 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 419 | dlog_notice("SMC %#010x attempted from VM %#x, blocked=%u\n", func, |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 420 | vm->id, block_by_default); |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 421 | |
| 422 | /* Access is still allowed in permissive mode. */ |
| 423 | return block_by_default; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 424 | } |
| 425 | |
| 426 | /** |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 427 | * Applies SMC access control according to manifest and forwards the call if |
| 428 | * access is granted. |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 429 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 430 | static void smc_forwarder(const struct vm *vm, struct ffa_value *args) |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 431 | { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 432 | struct ffa_value ret; |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 433 | uint32_t client_id = vm->id; |
| 434 | uintreg_t arg7 = args->arg7; |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 435 | |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 436 | if (smc_is_blocked(vm, args->func)) { |
| 437 | args->func = SMCCC_ERROR_UNKNOWN; |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 438 | return; |
| 439 | } |
| 440 | |
Andrew Walbran | 0dd67ff | 2019-09-12 16:38:50 +0100 | [diff] [blame] | 441 | /* |
| 442 | * Set the Client ID but keep the existing Secure OS ID and anything |
| 443 | * else (currently unspecified) that the client may have passed in the |
| 444 | * upper bits. |
| 445 | */ |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 446 | args->arg7 = client_id | (arg7 & ~CLIENT_ID_MASK); |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 447 | ret = smc_forward(args->func, args->arg1, args->arg2, args->arg3, |
| 448 | args->arg4, args->arg5, args->arg6, args->arg7); |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 449 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 450 | /* |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 451 | * Preserve the value passed by the caller, rather than the generated |
| 452 | * client_id. Note that this would also overwrite any return value that |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 453 | * may be in x7, but the SMCs that we are forwarding are legacy calls |
| 454 | * from before SMCCC 1.2 so won't have more than 4 return values anyway. |
| 455 | */ |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 456 | ret.arg7 = arg7; |
| 457 | |
| 458 | plat_smc_post_forward(*args, &ret); |
| 459 | |
| 460 | *args = ret; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 461 | } |
| 462 | |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 463 | /** |
| 464 | * In the normal world, ffa_handler is always called from the virtual FF-A |
Andrew Walbran | 8e8bf3f | 2020-10-07 17:58:20 +0100 | [diff] [blame] | 465 | * instance (from a VM in EL1). In the secure world, ffa_handler may be called |
| 466 | * from the virtual (a secure partition in S-EL1) or physical FF-A instance |
| 467 | * (from the normal world via EL3). The function returns true when the call is |
| 468 | * handled. The *next pointer is updated to the next vCPU to run, which might be |
| 469 | * the 'other world' vCPU if the call originated from the virtual FF-A instance |
| 470 | * and has to be forwarded down to EL3, or left as is to resume the current |
| 471 | * vCPU. |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 472 | */ |
| 473 | static bool ffa_handler(struct ffa_value *args, struct vcpu *current, |
| 474 | struct vcpu **next) |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 475 | { |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 476 | uint32_t func = args->func; |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 477 | |
Jose Marinho | c0f4ff2 | 2019-10-09 10:37:42 +0100 | [diff] [blame] | 478 | /* |
| 479 | * NOTE: When adding new methods to this handler update |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 480 | * api_ffa_features accordingly. |
Jose Marinho | c0f4ff2 | 2019-10-09 10:37:42 +0100 | [diff] [blame] | 481 | */ |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 482 | switch (func) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 483 | case FFA_VERSION_32: |
| 484 | *args = api_ffa_version(args->arg1); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 485 | return true; |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 486 | case FFA_PARTITION_INFO_GET_32: { |
| 487 | struct ffa_uuid uuid; |
| 488 | |
| 489 | ffa_uuid_init(args->arg1, args->arg2, args->arg3, args->arg4, |
| 490 | &uuid); |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 491 | *args = api_ffa_partition_info_get(current, &uuid); |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 492 | return true; |
| 493 | } |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 494 | case FFA_ID_GET_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 495 | *args = api_ffa_id_get(current); |
Andrew Walbran | d230f66 | 2019-10-07 18:03:36 +0100 | [diff] [blame] | 496 | return true; |
Daniel Boulby | b2fb80e | 2021-02-03 15:09:23 +0000 | [diff] [blame] | 497 | case FFA_SPM_ID_GET_32: |
| 498 | *args = api_ffa_spm_id_get(); |
| 499 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 500 | case FFA_FEATURES_32: |
| 501 | *args = api_ffa_features(args->arg1); |
Jose Marinho | c0f4ff2 | 2019-10-09 10:37:42 +0100 | [diff] [blame] | 502 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 503 | case FFA_RX_RELEASE_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 504 | *args = api_ffa_rx_release(current, next); |
Andrew Walbran | 8a0f5ca | 2019-11-05 13:12:23 +0000 | [diff] [blame] | 505 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 506 | case FFA_RXTX_MAP_64: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 507 | *args = api_ffa_rxtx_map(ipa_init(args->arg1), |
| 508 | ipa_init(args->arg2), args->arg3, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 509 | current, next); |
Andrew Walbran | bfffb0f | 2019-11-05 14:02:34 +0000 | [diff] [blame] | 510 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 511 | case FFA_YIELD_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 512 | *args = api_yield(current, next); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 513 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 514 | case FFA_MSG_SEND_32: |
J-Alves | d6f4e14 | 2021-03-05 13:33:59 +0000 | [diff] [blame] | 515 | *args = api_ffa_msg_send(ffa_sender(*args), ffa_receiver(*args), |
| 516 | ffa_msg_send_size(*args), |
| 517 | ffa_msg_send_attributes(*args), |
| 518 | current, next); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 519 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 520 | case FFA_MSG_WAIT_32: |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 521 | #if SECURE_WORLD == 1 |
| 522 | if (sp_boot_next(current, next, args)) { |
| 523 | return true; |
| 524 | } |
| 525 | #endif |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 526 | *args = api_ffa_msg_recv(true, current, next); |
Andrew Walbran | 0de4f16 | 2019-09-03 16:44:20 +0100 | [diff] [blame] | 527 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 528 | case FFA_MSG_POLL_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 529 | *args = api_ffa_msg_recv(false, current, next); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 530 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 531 | case FFA_RUN_32: |
| 532 | *args = api_ffa_run(ffa_vm_id(*args), ffa_vcpu_index(*args), |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 533 | current, next); |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame] | 534 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 535 | case FFA_MEM_DONATE_32: |
| 536 | case FFA_MEM_LEND_32: |
| 537 | case FFA_MEM_SHARE_32: |
| 538 | *args = api_ffa_mem_send(func, args->arg1, args->arg2, |
| 539 | ipa_init(args->arg3), args->arg4, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 540 | current); |
Andrew Walbran | 82d6d15 | 2019-12-24 15:02:06 +0000 | [diff] [blame] | 541 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 542 | case FFA_MEM_RETRIEVE_REQ_32: |
| 543 | *args = api_ffa_mem_retrieve_req(args->arg1, args->arg2, |
| 544 | ipa_init(args->arg3), |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 545 | args->arg4, current); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 546 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 547 | case FFA_MEM_RELINQUISH_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 548 | *args = api_ffa_mem_relinquish(current); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 549 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 550 | case FFA_MEM_RECLAIM_32: |
| 551 | *args = api_ffa_mem_reclaim( |
Andrew Walbran | 1bbe940 | 2020-04-30 16:47:13 +0100 | [diff] [blame] | 552 | ffa_assemble_handle(args->arg1, args->arg2), args->arg3, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 553 | current); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 554 | return true; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 555 | case FFA_MEM_FRAG_RX_32: |
| 556 | *args = api_ffa_mem_frag_rx(ffa_frag_handle(*args), args->arg3, |
| 557 | (args->arg4 >> 16) & 0xffff, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 558 | current); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 559 | return true; |
| 560 | case FFA_MEM_FRAG_TX_32: |
| 561 | *args = api_ffa_mem_frag_tx(ffa_frag_handle(*args), args->arg3, |
| 562 | (args->arg4 >> 16) & 0xffff, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 563 | current); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 564 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 565 | case FFA_MSG_SEND_DIRECT_REQ_64: |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 566 | case FFA_MSG_SEND_DIRECT_REQ_32: { |
| 567 | #if SECURE_WORLD == 1 |
| 568 | if (spmd_handler(args, current)) { |
| 569 | return true; |
| 570 | } |
| 571 | #endif |
J-Alves | d6f4e14 | 2021-03-05 13:33:59 +0000 | [diff] [blame] | 572 | *args = api_ffa_msg_send_direct_req(ffa_sender(*args), |
| 573 | ffa_receiver(*args), *args, |
| 574 | current, next); |
Olivier Deprez | ee9d6a9 | 2019-11-26 09:14:11 +0000 | [diff] [blame] | 575 | return true; |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 576 | } |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 577 | case FFA_MSG_SEND_DIRECT_RESP_64: |
Olivier Deprez | ee9d6a9 | 2019-11-26 09:14:11 +0000 | [diff] [blame] | 578 | case FFA_MSG_SEND_DIRECT_RESP_32: |
J-Alves | d6f4e14 | 2021-03-05 13:33:59 +0000 | [diff] [blame] | 579 | *args = api_ffa_msg_send_direct_resp(ffa_sender(*args), |
| 580 | ffa_receiver(*args), *args, |
| 581 | current, next); |
Olivier Deprez | ee9d6a9 | 2019-11-26 09:14:11 +0000 | [diff] [blame] | 582 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 583 | case FFA_SECONDARY_EP_REGISTER_64: |
Max Shvetsov | 40108e7 | 2020-08-27 12:39:50 +0100 | [diff] [blame] | 584 | *args = api_ffa_secondary_ep_register(ipa_init(args->arg1), |
| 585 | current); |
| 586 | return true; |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame] | 587 | } |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 588 | |
| 589 | return false; |
| 590 | } |
| 591 | |
| 592 | /** |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 593 | * Set or clear VI/VF bits according to pending interrupts. |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 594 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 595 | static void vcpu_update_virtual_interrupts(struct vcpu *next) |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 596 | { |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 597 | struct vcpu_locked vcpu_locked; |
| 598 | |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 599 | if (next == NULL) { |
| 600 | /* |
| 601 | * Not switching vCPUs, set the bit for the current vCPU |
| 602 | * directly in the register. |
| 603 | */ |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 604 | |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 605 | vcpu_locked = vcpu_lock(current()); |
| 606 | set_virtual_irq_current( |
| 607 | vcpu_interrupt_irq_count_get(vcpu_locked) > 0); |
| 608 | set_virtual_fiq_current( |
| 609 | vcpu_interrupt_fiq_count_get(vcpu_locked) > 0); |
| 610 | vcpu_unlock(&vcpu_locked); |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 611 | } else if (vm_id_is_current_world(next->vm->id)) { |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 612 | /* |
| 613 | * About to switch vCPUs, set the bit for the vCPU to which we |
| 614 | * are switching in the saved copy of the register. |
| 615 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 616 | |
| 617 | vcpu_locked = vcpu_lock(next); |
| 618 | set_virtual_irq(&next->regs, |
| 619 | vcpu_interrupt_irq_count_get(vcpu_locked) > 0); |
| 620 | set_virtual_fiq(&next->regs, |
| 621 | vcpu_interrupt_fiq_count_get(vcpu_locked) > 0); |
| 622 | vcpu_unlock(&vcpu_locked); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 623 | } |
| 624 | } |
| 625 | |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 626 | /** |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 627 | * Handles PSCI and FF-A calls and writes the return value back to the registers |
| 628 | * of the vCPU. This is shared between smc_handler and hvc_handler. |
| 629 | * |
| 630 | * Returns true if the call was handled. |
| 631 | */ |
| 632 | static bool hvc_smc_handler(struct ffa_value args, struct vcpu *vcpu, |
| 633 | struct vcpu **next) |
| 634 | { |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 635 | /* Do not expect PSCI calls emitted from within the secure world. */ |
| 636 | #if SECURE_WORLD == 0 |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 637 | if (psci_handler(vcpu, args.func, args.arg1, args.arg2, args.arg3, |
| 638 | &vcpu->regs.r[0], next)) { |
| 639 | return true; |
| 640 | } |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 641 | #endif |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 642 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 643 | if (ffa_handler(&args, vcpu, next)) { |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 644 | arch_regs_set_retval(&vcpu->regs, args); |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 645 | vcpu_update_virtual_interrupts(*next); |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 646 | return true; |
| 647 | } |
| 648 | |
| 649 | return false; |
| 650 | } |
| 651 | |
| 652 | /** |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 653 | * Processes SMC instruction calls. |
| 654 | */ |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 655 | static struct vcpu *smc_handler(struct vcpu *vcpu) |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 656 | { |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 657 | struct ffa_value args = arch_regs_get_args(&vcpu->regs); |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 658 | struct vcpu *next = NULL; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 659 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 660 | if (hvc_smc_handler(args, vcpu, &next)) { |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 661 | return next; |
Andrew Walbran | 4579f700 | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 662 | } |
| 663 | |
Andrew Walbran | 85c3766 | 2019-12-05 16:29:33 +0000 | [diff] [blame] | 664 | switch (args.func & ~SMCCC_CONVENTION_MASK) { |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 665 | case HF_DEBUG_LOG: |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 666 | vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu); |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 667 | return NULL; |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 668 | } |
| 669 | |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 670 | smc_forwarder(vcpu->vm, &args); |
| 671 | arch_regs_set_retval(&vcpu->regs, args); |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 672 | return NULL; |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 673 | } |
| 674 | |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 675 | #if SECURE_WORLD == 1 |
| 676 | |
| 677 | /** |
| 678 | * Called from other_world_loop return from SMC. |
| 679 | * Processes SMC calls originating from the NWd. |
| 680 | */ |
| 681 | struct vcpu *smc_handler_from_nwd(struct vcpu *vcpu) |
| 682 | { |
| 683 | struct ffa_value args = arch_regs_get_args(&vcpu->regs); |
| 684 | struct vcpu *next = NULL; |
| 685 | |
| 686 | if (hvc_smc_handler(args, vcpu, &next)) { |
| 687 | return next; |
| 688 | } |
| 689 | |
| 690 | /* |
| 691 | * If the SMC emitted by the normal world is not handled in the secure |
| 692 | * world then return an error stating such ABI is not supported. Only |
| 693 | * FF-A calls are supported. We cannot return SMCCC_ERROR_UNKNOWN |
| 694 | * directly because the SPMD smc handler would not recognize it as a |
| 695 | * standard FF-A call returning from the SPMC. |
| 696 | */ |
| 697 | arch_regs_set_retval(&vcpu->regs, ffa_error(FFA_NOT_SUPPORTED)); |
| 698 | |
| 699 | return NULL; |
| 700 | } |
| 701 | |
| 702 | #endif |
| 703 | |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 704 | /* |
| 705 | * Exception vector offsets. |
| 706 | * See Arm Architecture Reference Manual Armv8-A, D1.10.2. |
| 707 | */ |
| 708 | |
| 709 | /** |
| 710 | * Offset for synchronous exceptions at current EL with SPx. |
| 711 | */ |
| 712 | #define OFFSET_CURRENT_SPX UINT64_C(0x200) |
| 713 | |
| 714 | /** |
| 715 | * Offset for synchronous exceptions at lower EL using AArch64. |
| 716 | */ |
| 717 | #define OFFSET_LOWER_EL_64 UINT64_C(0x400) |
| 718 | |
| 719 | /** |
| 720 | * Offset for synchronous exceptions at lower EL using AArch32. |
| 721 | */ |
| 722 | #define OFFSET_LOWER_EL_32 UINT64_C(0x600) |
| 723 | |
| 724 | /** |
| 725 | * Returns the address for the exception handler at EL1. |
| 726 | */ |
| 727 | static uintreg_t get_el1_exception_handler_addr(const struct vcpu *vcpu) |
| 728 | { |
Raghu Krishnamurthy | 32626c9 | 2021-01-17 09:57:29 -0800 | [diff] [blame] | 729 | uintreg_t base_addr = has_vhe_support() ? read_msr(MSR_VBAR_EL12) |
| 730 | : read_msr(vbar_el1); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 731 | uintreg_t pe_mode = vcpu->regs.spsr & PSR_PE_MODE_MASK; |
| 732 | bool is_arch32 = vcpu->regs.spsr & PSR_ARCH_MODE_32; |
| 733 | |
| 734 | if (pe_mode == PSR_PE_MODE_EL0T) { |
| 735 | if (is_arch32) { |
| 736 | base_addr += OFFSET_LOWER_EL_32; |
| 737 | } else { |
| 738 | base_addr += OFFSET_LOWER_EL_64; |
| 739 | } |
| 740 | } else { |
| 741 | CHECK(!is_arch32); |
| 742 | base_addr += OFFSET_CURRENT_SPX; |
| 743 | } |
| 744 | |
| 745 | return base_addr; |
| 746 | } |
| 747 | |
| 748 | /** |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 749 | * Injects an exception with the specified Exception Syndrom Register value into |
| 750 | * the EL1. |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 751 | * |
| 752 | * NOTE: This function assumes that the lazy registers haven't been saved, and |
| 753 | * writes to the lazy registers of the CPU directly instead of the vCPU. |
| 754 | */ |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 755 | static void inject_el1_exception(struct vcpu *vcpu, uintreg_t esr_el1_value, |
| 756 | uintreg_t far_el1_value) |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 757 | { |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 758 | uintreg_t handler_address = get_el1_exception_handler_addr(vcpu); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 759 | |
| 760 | /* Update the CPU state to inject the exception. */ |
Raghu Krishnamurthy | 32626c9 | 2021-01-17 09:57:29 -0800 | [diff] [blame] | 761 | if (has_vhe_support()) { |
| 762 | write_msr(MSR_ESR_EL12, esr_el1_value); |
| 763 | write_msr(MSR_FAR_EL12, far_el1_value); |
| 764 | write_msr(MSR_ELR_EL12, vcpu->regs.pc); |
| 765 | write_msr(MSR_SPSR_EL12, vcpu->regs.spsr); |
| 766 | } else { |
| 767 | write_msr(esr_el1, esr_el1_value); |
| 768 | write_msr(far_el1, far_el1_value); |
| 769 | write_msr(elr_el1, vcpu->regs.pc); |
| 770 | write_msr(spsr_el1, vcpu->regs.spsr); |
| 771 | } |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 772 | |
| 773 | /* |
| 774 | * Mask (disable) interrupts and run in EL1h mode. |
| 775 | * EL1h mode is used because by default, taking an exception selects the |
| 776 | * stack pointer for the target Exception level. The software can change |
| 777 | * that later in the handler if needed. |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 778 | */ |
| 779 | vcpu->regs.spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H; |
| 780 | |
| 781 | /* Transfer control to the exception hander. */ |
| 782 | vcpu->regs.pc = handler_address; |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 783 | } |
| 784 | |
| 785 | /** |
| 786 | * Injects a Data Abort exception (same exception level). |
| 787 | */ |
| 788 | static void inject_el1_data_abort_exception(struct vcpu *vcpu, |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 789 | uintreg_t esr_el2, |
| 790 | uintreg_t far_el2) |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 791 | { |
| 792 | /* |
| 793 | * ISS encoding remains the same, but the EC is changed to reflect |
| 794 | * where the exception came from. |
| 795 | * See Arm Architecture Reference Manual Armv8-A, pages D13-2943/2982. |
| 796 | */ |
| 797 | uintreg_t esr_el1_value = GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) | |
| 798 | (EC_DATA_ABORT_SAME_EL << ESR_EC_OFFSET); |
| 799 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 800 | dlog_notice("Injecting Data Abort exception into VM %#x.\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 801 | vcpu->vm->id); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 802 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 803 | inject_el1_exception(vcpu, esr_el1_value, far_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 804 | } |
| 805 | |
| 806 | /** |
| 807 | * Injects a Data Abort exception (same exception level). |
| 808 | */ |
| 809 | static void inject_el1_instruction_abort_exception(struct vcpu *vcpu, |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 810 | uintreg_t esr_el2, |
| 811 | uintreg_t far_el2) |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 812 | { |
| 813 | /* |
| 814 | * ISS encoding remains the same, but the EC is changed to reflect |
| 815 | * where the exception came from. |
| 816 | * See Arm Architecture Reference Manual Armv8-A, pages D13-2941/2980. |
| 817 | */ |
| 818 | uintreg_t esr_el1_value = |
| 819 | GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) | |
| 820 | (EC_INSTRUCTION_ABORT_SAME_EL << ESR_EC_OFFSET); |
| 821 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 822 | dlog_notice("Injecting Instruction Abort exception into VM %#x.\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 823 | vcpu->vm->id); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 824 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 825 | inject_el1_exception(vcpu, esr_el1_value, far_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 826 | } |
| 827 | |
| 828 | /** |
| 829 | * Injects an exception with an unknown reason into the EL1. |
| 830 | */ |
| 831 | static void inject_el1_unknown_exception(struct vcpu *vcpu, uintreg_t esr_el2) |
| 832 | { |
| 833 | uintreg_t esr_el1_value = |
| 834 | GET_ESR_IL(esr_el2) | (EC_UNKNOWN << ESR_EC_OFFSET); |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 835 | |
| 836 | /* |
| 837 | * The value of the far_el2 register is UNKNOWN in this case, |
| 838 | * therefore, don't propagate it to avoid leaking sensitive information. |
| 839 | */ |
| 840 | uintreg_t far_el1_value = 0; |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 841 | char *direction_str; |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 842 | |
| 843 | direction_str = ISS_IS_READ(esr_el2) ? "read" : "write"; |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 844 | dlog_notice( |
| 845 | "Trapped access to system register %s: op0=%d, op1=%d, crn=%d, " |
| 846 | "crm=%d, op2=%d, rt=%d.\n", |
| 847 | direction_str, GET_ISS_OP0(esr_el2), GET_ISS_OP1(esr_el2), |
| 848 | GET_ISS_CRN(esr_el2), GET_ISS_CRM(esr_el2), |
| 849 | GET_ISS_OP2(esr_el2), GET_ISS_RT(esr_el2)); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 850 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 851 | dlog_notice("Injecting Unknown Reason exception into VM %#x.\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 852 | vcpu->vm->id); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 853 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 854 | inject_el1_exception(vcpu, esr_el1_value, far_el1_value); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 855 | } |
| 856 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 857 | static struct vcpu *hvc_handler(struct vcpu *vcpu) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 858 | { |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 859 | struct ffa_value args = arch_regs_get_args(&vcpu->regs); |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 860 | struct vcpu *next = NULL; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 861 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 862 | if (hvc_smc_handler(args, vcpu, &next)) { |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 863 | return next; |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 864 | } |
Jose Marinho | fc0b2b6 | 2019-06-06 11:18:45 +0100 | [diff] [blame] | 865 | |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 866 | switch (args.func) { |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 867 | case HF_MAILBOX_WRITABLE_GET: |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 868 | vcpu->regs.r[0] = api_mailbox_writable_get(vcpu); |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 869 | break; |
| 870 | |
| 871 | case HF_MAILBOX_WAITER_GET: |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 872 | vcpu->regs.r[0] = api_mailbox_waiter_get(args.arg1, vcpu); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 873 | break; |
| 874 | |
Wedson Almeida Filho | c559d13 | 2019-01-09 19:33:40 +0000 | [diff] [blame] | 875 | case HF_INTERRUPT_ENABLE: |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 876 | vcpu->regs.r[0] = api_interrupt_enable(args.arg1, args.arg2, |
| 877 | args.arg3, vcpu); |
Andrew Walbran | 318f573 | 2018-11-20 16:23:42 +0000 | [diff] [blame] | 878 | break; |
| 879 | |
Wedson Almeida Filho | c559d13 | 2019-01-09 19:33:40 +0000 | [diff] [blame] | 880 | case HF_INTERRUPT_GET: |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 881 | vcpu->regs.r[0] = api_interrupt_get(vcpu); |
Andrew Walbran | 318f573 | 2018-11-20 16:23:42 +0000 | [diff] [blame] | 882 | break; |
| 883 | |
Wedson Almeida Filho | c559d13 | 2019-01-09 19:33:40 +0000 | [diff] [blame] | 884 | case HF_INTERRUPT_INJECT: |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 885 | vcpu->regs.r[0] = api_interrupt_inject(args.arg1, args.arg2, |
| 886 | args.arg3, vcpu, &next); |
Andrew Walbran | 318f573 | 2018-11-20 16:23:42 +0000 | [diff] [blame] | 887 | break; |
| 888 | |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 889 | case HF_DEBUG_LOG: |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 890 | vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu); |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 891 | break; |
| 892 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 893 | default: |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 894 | vcpu->regs.r[0] = SMCCC_ERROR_UNKNOWN; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 895 | } |
| 896 | |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 897 | vcpu_update_virtual_interrupts(next); |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 898 | |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 899 | return next; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 900 | } |
| 901 | |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 902 | struct vcpu *irq_lower(void) |
| 903 | { |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 904 | /* |
| 905 | * Switch back to primary VM, interrupts will be handled there. |
| 906 | * |
| 907 | * If the VM has aborted, this vCPU will be aborted when the scheduler |
| 908 | * tries to run it again. This means the interrupt will not be delayed |
| 909 | * by the aborted VM. |
| 910 | * |
| 911 | * TODO: Only switch when the interrupt isn't for the current VM. |
| 912 | */ |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame] | 913 | return api_preempt(current()); |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 914 | } |
| 915 | |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 916 | struct vcpu *fiq_lower(void) |
| 917 | { |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 918 | #if SECURE_WORLD == 1 |
| 919 | struct vcpu_locked current_locked; |
| 920 | struct vcpu *current_vcpu = current(); |
| 921 | int ret; |
| 922 | |
| 923 | if (current_vcpu->vm->supports_managed_exit) { |
| 924 | /* Mask all interrupts */ |
| 925 | plat_interrupts_set_priority_mask(0x0); |
| 926 | |
| 927 | current_locked = vcpu_lock(current_vcpu); |
| 928 | ret = api_interrupt_inject_locked(current_locked, |
| 929 | HF_MANAGED_EXIT_INTID, |
| 930 | current_vcpu, NULL); |
| 931 | if (ret != 0) { |
| 932 | panic("Failed to inject managed exit interrupt\n"); |
| 933 | } |
| 934 | |
| 935 | /* Entering managed exit sequence. */ |
| 936 | current_vcpu->processing_managed_exit = true; |
| 937 | |
| 938 | vcpu_unlock(¤t_locked); |
| 939 | |
| 940 | /* |
| 941 | * Since we are in interrupt context, set the bit for the |
| 942 | * current vCPU directly in the register. |
| 943 | */ |
| 944 | vcpu_update_virtual_interrupts(NULL); |
| 945 | |
| 946 | /* Resume current vCPU. */ |
| 947 | return NULL; |
| 948 | } |
| 949 | |
| 950 | /* |
| 951 | * SP does not support managed exit. It is pre-empted and execution |
| 952 | * handed back to the normal world through the FFA_INTERRUPT ABI. |
| 953 | * The SP can be resumed later by ffa_run. The call to irq_lower |
| 954 | * and api_preempt is equivalent to calling api_switch_to_other_world |
| 955 | * for current vCPU passing FFA_INTERRUPT_32. |
| 956 | */ |
| 957 | #endif |
| 958 | |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 959 | return irq_lower(); |
| 960 | } |
| 961 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 962 | noreturn struct vcpu *serr_lower(void) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 963 | { |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 964 | /* |
| 965 | * SError exceptions should be isolated and handled by the responsible |
| 966 | * VM/exception level. Getting here indicates a bug, that isolation is |
| 967 | * not working, or a processor that does not support ARMv8.2-IESB, in |
| 968 | * which case Hafnium routes SError exceptions to EL2 (here). |
| 969 | */ |
| 970 | panic("SError from a lower exception level."); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 971 | } |
| 972 | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 973 | /** |
| 974 | * Initialises a fault info structure. It assumes that an FnV bit exists at |
| 975 | * bit offset 10 of the ESR, and that it is only valid when the bottom 6 bits of |
| 976 | * the ESR (the fault status code) are 010000; this is the case for both |
| 977 | * instruction and data aborts, but not necessarily for other exception reasons. |
| 978 | */ |
| 979 | static struct vcpu_fault_info fault_info_init(uintreg_t esr, |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 980 | const struct vcpu *vcpu, |
| 981 | uint32_t mode) |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 982 | { |
| 983 | uint32_t fsc = esr & 0x3f; |
| 984 | struct vcpu_fault_info r; |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 985 | uint64_t hpfar_el2_val; |
| 986 | uint64_t hpfar_el2_fipa; |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 987 | |
| 988 | r.mode = mode; |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 989 | r.pc = va_init(vcpu->regs.pc); |
| 990 | |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 991 | /* Get Hypervisor IPA Fault Address value. */ |
| 992 | hpfar_el2_val = read_msr(hpfar_el2); |
| 993 | |
| 994 | /* Extract Faulting IPA. */ |
| 995 | hpfar_el2_fipa = (hpfar_el2_val & HPFAR_EL2_FIPA) << 8; |
| 996 | |
| 997 | #if SECURE_WORLD == 1 |
| 998 | |
| 999 | /** |
| 1000 | * Determine if faulting IPA targets NS space. |
| 1001 | * At NS-EL2 hpfar_el2 bit 63 is RES0. At S-EL2, this bit determines if |
| 1002 | * the faulting Stage-1 address output is a secure or non-secure IPA. |
| 1003 | */ |
| 1004 | if ((hpfar_el2_val & HPFAR_EL2_NS) != 0) { |
| 1005 | r.mode |= MM_MODE_NS; |
| 1006 | } |
| 1007 | |
| 1008 | #endif |
| 1009 | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1010 | /* |
| 1011 | * Check the FnV bit, which is only valid if dfsc/ifsc is 010000. It |
| 1012 | * indicates that we cannot rely on far_el2. |
| 1013 | */ |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 1014 | if (fsc == 0x10 && esr & (1U << 10)) { |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1015 | r.vaddr = va_init(0); |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 1016 | r.ipaddr = ipa_init(hpfar_el2_fipa); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1017 | } else { |
| 1018 | r.vaddr = va_init(read_msr(far_el2)); |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 1019 | r.ipaddr = ipa_init(hpfar_el2_fipa | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1020 | (read_msr(far_el2) & (PAGE_SIZE - 1))); |
| 1021 | } |
| 1022 | |
| 1023 | return r; |
| 1024 | } |
| 1025 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 1026 | struct vcpu *sync_lower_exception(uintreg_t esr, uintreg_t far) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1027 | { |
Wedson Almeida Filho | 00df6c7 | 2018-10-18 11:19:24 +0100 | [diff] [blame] | 1028 | struct vcpu *vcpu = current(); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1029 | struct vcpu_fault_info info; |
Jose Marinho | 135dff3 | 2019-02-28 10:25:57 +0000 | [diff] [blame] | 1030 | struct vcpu *new_vcpu; |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1031 | uintreg_t ec = GET_ESR_EC(esr); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1032 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1033 | switch (ec) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1034 | case EC_WFI_WFE: |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 1035 | /* Skip the instruction. */ |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1036 | vcpu->regs.pc += GET_NEXT_PC_INC(esr); |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 1037 | /* Check TI bit of ISS, 0 = WFI, 1 = WFE. */ |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 1038 | if (esr & 1) { |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 1039 | /* WFE */ |
| 1040 | /* |
| 1041 | * TODO: consider giving the scheduler more context, |
| 1042 | * somehow. |
| 1043 | */ |
Andrew Walbran | 16075b6 | 2019-09-03 17:11:07 +0100 | [diff] [blame] | 1044 | api_yield(vcpu, &new_vcpu); |
Jose Marinho | 135dff3 | 2019-02-28 10:25:57 +0000 | [diff] [blame] | 1045 | return new_vcpu; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 1046 | } |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 1047 | /* WFI */ |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 1048 | return api_wait_for_interrupt(vcpu); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1049 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1050 | case EC_DATA_ABORT_LOWER_EL: |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1051 | info = fault_info_init( |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 1052 | esr, vcpu, (esr & (1U << 6)) ? MM_MODE_W : MM_MODE_R); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1053 | if (vcpu_handle_page_fault(vcpu, &info)) { |
| 1054 | return NULL; |
| 1055 | } |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1056 | /* Inform the EL1 of the data abort. */ |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 1057 | inject_el1_data_abort_exception(vcpu, esr, far); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1058 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1059 | /* Schedule the same VM to continue running. */ |
| 1060 | return NULL; |
| 1061 | |
| 1062 | case EC_INSTRUCTION_ABORT_LOWER_EL: |
Andrew Scull | d3cfaad | 2019-04-04 11:34:10 +0100 | [diff] [blame] | 1063 | info = fault_info_init(esr, vcpu, MM_MODE_X); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1064 | if (vcpu_handle_page_fault(vcpu, &info)) { |
| 1065 | return NULL; |
| 1066 | } |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1067 | /* Inform the EL1 of the instruction abort. */ |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 1068 | inject_el1_instruction_abort_exception(vcpu, esr, far); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 1069 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1070 | /* Schedule the same VM to continue running. */ |
| 1071 | return NULL; |
| 1072 | |
| 1073 | case EC_HVC: |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1074 | return hvc_handler(vcpu); |
| 1075 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1076 | case EC_SMC: { |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 1077 | uintreg_t smc_pc = vcpu->regs.pc; |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 1078 | struct vcpu *next = smc_handler(vcpu); |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 1079 | |
| 1080 | /* Skip the SMC instruction. */ |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1081 | vcpu->regs.pc = smc_pc + GET_NEXT_PC_INC(esr); |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 1082 | |
Andrew Walbran | 3364565 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 1083 | return next; |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 1084 | } |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 1085 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1086 | case EC_MSR: |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1087 | /* |
| 1088 | * NOTE: This should never be reached because it goes through a |
| 1089 | * separate path handled by handle_system_register_access(). |
| 1090 | */ |
| 1091 | panic("Handled by handle_system_register_access()."); |
| 1092 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1093 | default: |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1094 | dlog_notice( |
| 1095 | "Unknown lower sync exception pc=%#x, esr=%#x, " |
| 1096 | "ec=%#x\n", |
| 1097 | vcpu->regs.pc, esr, ec); |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 1098 | break; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1099 | } |
| 1100 | |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1101 | /* |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 1102 | * The exception wasn't handled. Inject to the VM to give it chance to |
| 1103 | * handle as an unknown exception. |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1104 | */ |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1105 | inject_el1_unknown_exception(vcpu, esr); |
| 1106 | |
| 1107 | /* Schedule the same VM to continue running. */ |
| 1108 | return NULL; |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1109 | } |
| 1110 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1111 | /** |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 1112 | * Handles EC = 011000, MSR, MRS instruction traps. |
Fuad Tabba | ed294af | 2019-12-20 10:43:01 +0000 | [diff] [blame] | 1113 | * Returns non-null ONLY if the access failed and the vCPU is changing. |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1114 | */ |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1115 | void handle_system_register_access(uintreg_t esr_el2) |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1116 | { |
| 1117 | struct vcpu *vcpu = current(); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1118 | ffa_vm_id_t vm_id = vcpu->vm->id; |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1119 | uintreg_t ec = GET_ESR_EC(esr_el2); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1120 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1121 | CHECK(ec == EC_MSR); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1122 | /* |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1123 | * Handle accesses to debug and performance monitor registers. |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1124 | * Inject an exception for unhandled/unsupported registers. |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1125 | */ |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1126 | if (debug_el1_is_register_access(esr_el2)) { |
| 1127 | if (!debug_el1_process_access(vcpu, vm_id, esr_el2)) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1128 | inject_el1_unknown_exception(vcpu, esr_el2); |
| 1129 | return; |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1130 | } |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1131 | } else if (perfmon_is_register_access(esr_el2)) { |
| 1132 | if (!perfmon_process_access(vcpu, vm_id, esr_el2)) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1133 | inject_el1_unknown_exception(vcpu, esr_el2); |
| 1134 | return; |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1135 | } |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 1136 | } else if (feature_id_is_register_access(esr_el2)) { |
| 1137 | if (!feature_id_process_access(vcpu, esr_el2)) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1138 | inject_el1_unknown_exception(vcpu, esr_el2); |
| 1139 | return; |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 1140 | } |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1141 | } else { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1142 | inject_el1_unknown_exception(vcpu, esr_el2); |
| 1143 | return; |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1144 | } |
| 1145 | |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1146 | /* Instruction was fulfilled. Skip it and run the next one. */ |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1147 | vcpu->regs.pc += GET_NEXT_PC_INC(esr_el2); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1148 | } |