Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 9 | #include <stdnoreturn.h> |
| 10 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 11 | #include "hf/arch/barriers.h" |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 12 | #include "hf/arch/init.h" |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 13 | #include "hf/arch/mmu.h" |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 14 | #include "hf/arch/plat/smc.h" |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 15 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 16 | #include "hf/api.h" |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 17 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 18 | #include "hf/cpu.h" |
| 19 | #include "hf/dlog.h" |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 20 | #include "hf/ffa.h" |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 21 | #include "hf/ffa_internal.h" |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 22 | #include "hf/panic.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 23 | #include "hf/vm.h" |
| 24 | |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 25 | #include "vmapi/hf/call.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 26 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 27 | #include "debug_el1.h" |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 28 | #include "feature_id.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 29 | #include "msr.h" |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 30 | #include "perfmon.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 31 | #include "psci.h" |
Andrew Walbran | 3364565 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 32 | #include "psci_handler.h" |
Andrew Scull | 7fd4bb7 | 2018-12-08 23:40:12 +0000 | [diff] [blame] | 33 | #include "smc.h" |
Fuad Tabba | ba8c44d | 2019-09-23 14:38:58 +0100 | [diff] [blame] | 34 | #include "sysregs.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 35 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 36 | /** |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 37 | * Hypervisor Fault Address Register Non-Secure. |
| 38 | */ |
| 39 | #define HPFAR_EL2_NS (UINT64_C(0x1) << 63) |
| 40 | |
| 41 | /** |
| 42 | * Hypervisor Fault Address Register Faulting IPA. |
| 43 | */ |
| 44 | #define HPFAR_EL2_FIPA (UINT64_C(0xFFFFFFFFFF0)) |
| 45 | |
| 46 | /** |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 47 | * Gets the value to increment for the next PC. |
| 48 | * The ESR encodes whether the instruction is 2 bytes or 4 bytes long. |
| 49 | */ |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 50 | #define GET_NEXT_PC_INC(esr) (GET_ESR_IL(esr) ? 4 : 2) |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 51 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 52 | /** |
Andrew Walbran | 0dd67ff | 2019-09-12 16:38:50 +0100 | [diff] [blame] | 53 | * The Client ID field within X7 for an SMC64 call. |
| 54 | */ |
| 55 | #define CLIENT_ID_MASK UINT64_C(0xffff) |
| 56 | |
| 57 | /** |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 58 | * Returns a reference to the currently executing vCPU. |
| 59 | */ |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 60 | static struct vcpu *current(void) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 61 | { |
| 62 | return (struct vcpu *)read_msr(tpidr_el2); |
| 63 | } |
| 64 | |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 65 | /** |
| 66 | * Saves the state of per-vCPU peripherals, such as the virtual timer, and |
| 67 | * informs the arch-independent sections that registers have been saved. |
| 68 | */ |
| 69 | void complete_saving_state(struct vcpu *vcpu) |
| 70 | { |
Andrew Walbran | 6480f8f | 2019-06-05 17:39:14 +0100 | [diff] [blame] | 71 | vcpu->regs.peripherals.cntv_cval_el0 = read_msr(cntv_cval_el0); |
| 72 | vcpu->regs.peripherals.cntv_ctl_el0 = read_msr(cntv_ctl_el0); |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 73 | |
| 74 | api_regs_state_saved(vcpu); |
| 75 | |
| 76 | /* |
| 77 | * If switching away from the primary, copy the current EL0 virtual |
| 78 | * timer registers to the corresponding EL2 physical timer registers. |
| 79 | * This is used to emulate the virtual timer for the primary in case it |
| 80 | * should fire while the secondary is running. |
| 81 | */ |
| 82 | if (vcpu->vm->id == HF_PRIMARY_VM_ID) { |
| 83 | /* |
| 84 | * Clear timer control register before copying compare value, to |
| 85 | * avoid a spurious timer interrupt. This could be a problem if |
| 86 | * the interrupt is configured as edge-triggered, as it would |
| 87 | * then be latched in. |
| 88 | */ |
| 89 | write_msr(cnthp_ctl_el2, 0); |
| 90 | write_msr(cnthp_cval_el2, read_msr(cntv_cval_el0)); |
| 91 | write_msr(cnthp_ctl_el2, read_msr(cntv_ctl_el0)); |
| 92 | } |
| 93 | } |
| 94 | |
| 95 | /** |
| 96 | * Restores the state of per-vCPU peripherals, such as the virtual timer. |
| 97 | */ |
| 98 | void begin_restoring_state(struct vcpu *vcpu) |
| 99 | { |
| 100 | /* |
| 101 | * Clear timer control register before restoring compare value, to avoid |
| 102 | * a spurious timer interrupt. This could be a problem if the interrupt |
| 103 | * is configured as edge-triggered, as it would then be latched in. |
| 104 | */ |
| 105 | write_msr(cntv_ctl_el0, 0); |
Andrew Walbran | 6480f8f | 2019-06-05 17:39:14 +0100 | [diff] [blame] | 106 | write_msr(cntv_cval_el0, vcpu->regs.peripherals.cntv_cval_el0); |
| 107 | write_msr(cntv_ctl_el0, vcpu->regs.peripherals.cntv_ctl_el0); |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 108 | |
| 109 | /* |
| 110 | * If we are switching (back) to the primary, disable the EL2 physical |
| 111 | * timer which was being used to emulate the EL0 virtual timer, as the |
| 112 | * virtual timer is now running for the primary again. |
| 113 | */ |
| 114 | if (vcpu->vm->id == HF_PRIMARY_VM_ID) { |
| 115 | write_msr(cnthp_ctl_el2, 0); |
| 116 | write_msr(cnthp_cval_el2, 0); |
| 117 | } |
| 118 | } |
| 119 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 120 | /** |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 121 | * Invalidate all stage 1 TLB entries on the current (physical) CPU for the |
| 122 | * current VMID. |
| 123 | */ |
| 124 | static void invalidate_vm_tlb(void) |
| 125 | { |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 126 | /* |
| 127 | * Ensure that the last VTTBR write has taken effect so we invalidate |
| 128 | * the right set of TLB entries. |
| 129 | */ |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 130 | isb(); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 131 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 132 | __asm__ volatile("tlbi vmalle1"); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 133 | |
| 134 | /* |
| 135 | * Ensure that no instructions are fetched for the VM until after the |
| 136 | * TLB invalidation has taken effect. |
| 137 | */ |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 138 | isb(); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 139 | |
| 140 | /* |
| 141 | * Ensure that no data reads or writes for the VM happen until after the |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 142 | * TLB invalidation has taken effect. Non-shareable is enough because |
| 143 | * the TLB is local to the CPU. |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 144 | */ |
David Brazdil | 851948e | 2019-08-09 12:02:12 +0100 | [diff] [blame] | 145 | dsb(nsh); |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | /** |
| 149 | * Invalidates the TLB if a different vCPU is being run than the last vCPU of |
| 150 | * the same VM which was run on the current pCPU. |
| 151 | * |
| 152 | * This is necessary because VMs may (contrary to the architecture |
| 153 | * specification) use inconsistent ASIDs across vCPUs. c.f. KVM's similar |
| 154 | * workaround: |
| 155 | * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=94d0e5980d6791b9 |
| 156 | */ |
| 157 | void maybe_invalidate_tlb(struct vcpu *vcpu) |
| 158 | { |
| 159 | size_t current_cpu_index = cpu_index(vcpu->cpu); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 160 | ffa_vcpu_index_t new_vcpu_index = vcpu_index(vcpu); |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 161 | |
| 162 | if (vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] != |
| 163 | new_vcpu_index) { |
| 164 | /* |
| 165 | * The vCPU has changed since the last time this VM was run on |
| 166 | * this pCPU, so we need to invalidate the TLB. |
| 167 | */ |
| 168 | invalidate_vm_tlb(); |
| 169 | |
| 170 | /* Record the fact that this vCPU is now running on this CPU. */ |
| 171 | vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] = |
| 172 | new_vcpu_index; |
| 173 | } |
| 174 | } |
| 175 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 176 | noreturn void irq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 177 | { |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 178 | (void)elr; |
| 179 | (void)spsr; |
| 180 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 181 | panic("IRQ from current exception level."); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 182 | } |
| 183 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 184 | noreturn void fiq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 185 | { |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 186 | (void)elr; |
| 187 | (void)spsr; |
| 188 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 189 | panic("FIQ from current exception level."); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 190 | } |
| 191 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 192 | noreturn void serr_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 193 | { |
| 194 | (void)elr; |
| 195 | (void)spsr; |
| 196 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 197 | panic("SError from current exception level."); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 198 | } |
| 199 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 200 | noreturn void sync_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 201 | { |
| 202 | uintreg_t esr = read_msr(esr_el2); |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 203 | uintreg_t ec = GET_ESR_EC(esr); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 204 | |
| 205 | (void)spsr; |
| 206 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 207 | switch (ec) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 208 | case EC_DATA_ABORT_SAME_EL: |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 209 | if (!(esr & (1U << 10))) { /* Check FnV bit. */ |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 210 | dlog_error( |
| 211 | "Data abort: pc=%#x, esr=%#x, ec=%#x, " |
| 212 | "far=%#x\n", |
| 213 | elr, esr, ec, read_msr(far_el2)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 214 | } else { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 215 | dlog_error( |
| 216 | "Data abort: pc=%#x, esr=%#x, ec=%#x, " |
| 217 | "far=invalid\n", |
| 218 | elr, esr, ec); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 219 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 220 | |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 221 | break; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 222 | |
| 223 | default: |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 224 | dlog_error( |
| 225 | "Unknown current sync exception pc=%#x, esr=%#x, " |
| 226 | "ec=%#x\n", |
| 227 | elr, esr, ec); |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 228 | break; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 229 | } |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 230 | |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 231 | panic("EL2 exception"); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 232 | } |
| 233 | |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 234 | /** |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 235 | * Sets or clears the VI bit in the HCR_EL2 register saved in the given |
| 236 | * arch_regs. |
| 237 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame^] | 238 | static void set_virtual_irq(struct arch_regs *r, bool enable) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 239 | { |
| 240 | if (enable) { |
| 241 | r->lazy.hcr_el2 |= HCR_EL2_VI; |
| 242 | } else { |
| 243 | r->lazy.hcr_el2 &= ~HCR_EL2_VI; |
| 244 | } |
| 245 | } |
| 246 | |
| 247 | /** |
| 248 | * Sets or clears the VI bit in the HCR_EL2 register. |
| 249 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame^] | 250 | static void set_virtual_irq_current(bool enable) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 251 | { |
| 252 | uintreg_t hcr_el2 = read_msr(hcr_el2); |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 253 | |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 254 | if (enable) { |
| 255 | hcr_el2 |= HCR_EL2_VI; |
| 256 | } else { |
| 257 | hcr_el2 &= ~HCR_EL2_VI; |
| 258 | } |
| 259 | write_msr(hcr_el2, hcr_el2); |
| 260 | } |
| 261 | |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame^] | 262 | /** |
| 263 | * Sets or clears the VF bit in the HCR_EL2 register saved in the given |
| 264 | * arch_regs. |
| 265 | */ |
| 266 | static void set_virtual_fiq(struct arch_regs *r, bool enable) |
| 267 | { |
| 268 | if (enable) { |
| 269 | r->lazy.hcr_el2 |= HCR_EL2_VF; |
| 270 | } else { |
| 271 | r->lazy.hcr_el2 &= ~HCR_EL2_VF; |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | /** |
| 276 | * Sets or clears the VF bit in the HCR_EL2 register. |
| 277 | */ |
| 278 | static void set_virtual_fiq_current(bool enable) |
| 279 | { |
| 280 | uintreg_t hcr_el2 = read_msr(hcr_el2); |
| 281 | |
| 282 | if (enable) { |
| 283 | hcr_el2 |= HCR_EL2_VF; |
| 284 | } else { |
| 285 | hcr_el2 &= ~HCR_EL2_VF; |
| 286 | } |
| 287 | write_msr(hcr_el2, hcr_el2); |
| 288 | } |
| 289 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 290 | #if SECURE_WORLD == 1 |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 291 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 292 | static bool sp_boot_next(struct vcpu *current, struct vcpu **next, |
| 293 | struct ffa_value *ffa_ret) |
| 294 | { |
| 295 | struct vm_locked current_vm_locked; |
| 296 | struct vm *vm_next = NULL; |
| 297 | bool ret = false; |
| 298 | |
| 299 | /* |
| 300 | * If VM hasn't been initialized, initialize it and traverse |
| 301 | * booting list following "next_boot" field in the VM structure. |
| 302 | * Once all the SPs have been booted (when "next_boot" is NULL), |
| 303 | * return execution to the NWd. |
| 304 | */ |
| 305 | current_vm_locked = vm_lock(current->vm); |
| 306 | if (current_vm_locked.vm->initialized == false) { |
| 307 | current_vm_locked.vm->initialized = true; |
| 308 | dlog_verbose("Initialized VM: %#x, boot_order: %u\n", |
| 309 | current_vm_locked.vm->id, |
| 310 | current_vm_locked.vm->boot_order); |
| 311 | |
| 312 | if (current_vm_locked.vm->next_boot != NULL) { |
| 313 | current->state = VCPU_STATE_BLOCKED_MAILBOX; |
| 314 | vm_next = current_vm_locked.vm->next_boot; |
| 315 | CHECK(vm_next->initialized == false); |
| 316 | *next = vm_get_vcpu(vm_next, vcpu_index(current)); |
| 317 | arch_regs_reset(*next); |
| 318 | (*next)->cpu = current->cpu; |
| 319 | (*next)->state = VCPU_STATE_RUNNING; |
| 320 | (*next)->regs_available = false; |
| 321 | |
| 322 | *ffa_ret = (struct ffa_value){.func = FFA_INTERRUPT_32}; |
| 323 | ret = true; |
| 324 | goto out; |
| 325 | } |
| 326 | |
| 327 | dlog_verbose("Finished initializing all VMs.\n"); |
| 328 | } |
| 329 | |
| 330 | out: |
| 331 | vm_unlock(¤t_vm_locked); |
| 332 | return ret; |
| 333 | } |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 334 | |
| 335 | /** |
| 336 | * Handle special direct messages from SPMD to SPMC. For now related to power |
| 337 | * management only. |
| 338 | */ |
| 339 | static bool spmd_handler(struct ffa_value *args, struct vcpu *current) |
| 340 | { |
| 341 | ffa_vm_id_t sender = ffa_msg_send_sender(*args); |
| 342 | ffa_vm_id_t receiver = ffa_msg_send_receiver(*args); |
| 343 | ffa_vm_id_t current_vm_id = current->vm->id; |
| 344 | |
| 345 | /* |
| 346 | * Check if direct message request is originating from the SPMD and |
| 347 | * directed to the SPMC. |
| 348 | */ |
| 349 | if (!(sender == HF_SPMD_VM_ID && receiver == HF_SPMC_VM_ID && |
| 350 | current_vm_id == HF_OTHER_WORLD_ID)) { |
| 351 | return false; |
| 352 | } |
| 353 | |
| 354 | switch (args->arg3) { |
| 355 | case PSCI_CPU_OFF: { |
| 356 | struct vm *vm = vm_get_first_boot(); |
| 357 | struct vcpu *vcpu = vm_get_vcpu(vm, vcpu_index(current)); |
| 358 | |
| 359 | /* |
| 360 | * TODO: the PM event reached the SPMC. In a later iteration, |
| 361 | * the PM event can be passed to the SP by resuming it. |
| 362 | */ |
| 363 | *args = (struct ffa_value){ |
| 364 | .func = FFA_MSG_SEND_DIRECT_RESP_32, |
| 365 | .arg1 = ((uint64_t)HF_SPMC_VM_ID << 16) | HF_SPMD_VM_ID, |
| 366 | .arg2 = 0U}; |
| 367 | |
| 368 | dlog_verbose("%s cpu off notification cpuid %#x\n", __func__, |
| 369 | vcpu->cpu->id); |
| 370 | cpu_off(vcpu->cpu); |
| 371 | break; |
| 372 | } |
| 373 | default: |
| 374 | dlog_verbose("%s message not handled %#x\n", __func__, |
| 375 | args->arg3); |
| 376 | return false; |
| 377 | } |
| 378 | |
| 379 | return true; |
| 380 | } |
| 381 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 382 | #endif |
| 383 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 384 | /** |
| 385 | * Checks whether to block an SMC being forwarded from a VM. |
| 386 | */ |
| 387 | static bool smc_is_blocked(const struct vm *vm, uint32_t func) |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 388 | { |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 389 | bool block_by_default = !vm->smc_whitelist.permissive; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 390 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 391 | for (size_t i = 0; i < vm->smc_whitelist.smc_count; ++i) { |
| 392 | if (func == vm->smc_whitelist.smcs[i]) { |
| 393 | return false; |
| 394 | } |
| 395 | } |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 396 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 397 | dlog_notice("SMC %#010x attempted from VM %#x, blocked=%u\n", func, |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 398 | vm->id, block_by_default); |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 399 | |
| 400 | /* Access is still allowed in permissive mode. */ |
| 401 | return block_by_default; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 402 | } |
| 403 | |
| 404 | /** |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 405 | * Applies SMC access control according to manifest and forwards the call if |
| 406 | * access is granted. |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 407 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 408 | static void smc_forwarder(const struct vm *vm, struct ffa_value *args) |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 409 | { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 410 | struct ffa_value ret; |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 411 | uint32_t client_id = vm->id; |
| 412 | uintreg_t arg7 = args->arg7; |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 413 | |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 414 | if (smc_is_blocked(vm, args->func)) { |
| 415 | args->func = SMCCC_ERROR_UNKNOWN; |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 416 | return; |
| 417 | } |
| 418 | |
Andrew Walbran | 0dd67ff | 2019-09-12 16:38:50 +0100 | [diff] [blame] | 419 | /* |
| 420 | * Set the Client ID but keep the existing Secure OS ID and anything |
| 421 | * else (currently unspecified) that the client may have passed in the |
| 422 | * upper bits. |
| 423 | */ |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 424 | args->arg7 = client_id | (arg7 & ~CLIENT_ID_MASK); |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 425 | ret = smc_forward(args->func, args->arg1, args->arg2, args->arg3, |
| 426 | args->arg4, args->arg5, args->arg6, args->arg7); |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 427 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 428 | /* |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 429 | * Preserve the value passed by the caller, rather than the generated |
| 430 | * client_id. Note that this would also overwrite any return value that |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 431 | * may be in x7, but the SMCs that we are forwarding are legacy calls |
| 432 | * from before SMCCC 1.2 so won't have more than 4 return values anyway. |
| 433 | */ |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 434 | ret.arg7 = arg7; |
| 435 | |
| 436 | plat_smc_post_forward(*args, &ret); |
| 437 | |
| 438 | *args = ret; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 439 | } |
| 440 | |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 441 | /** |
| 442 | * In the normal world, ffa_handler is always called from the virtual FF-A |
Andrew Walbran | 8e8bf3f | 2020-10-07 17:58:20 +0100 | [diff] [blame] | 443 | * instance (from a VM in EL1). In the secure world, ffa_handler may be called |
| 444 | * from the virtual (a secure partition in S-EL1) or physical FF-A instance |
| 445 | * (from the normal world via EL3). The function returns true when the call is |
| 446 | * handled. The *next pointer is updated to the next vCPU to run, which might be |
| 447 | * the 'other world' vCPU if the call originated from the virtual FF-A instance |
| 448 | * and has to be forwarded down to EL3, or left as is to resume the current |
| 449 | * vCPU. |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 450 | */ |
| 451 | static bool ffa_handler(struct ffa_value *args, struct vcpu *current, |
| 452 | struct vcpu **next) |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 453 | { |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 454 | uint32_t func = args->func; |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 455 | |
Jose Marinho | c0f4ff2 | 2019-10-09 10:37:42 +0100 | [diff] [blame] | 456 | /* |
| 457 | * NOTE: When adding new methods to this handler update |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 458 | * api_ffa_features accordingly. |
Jose Marinho | c0f4ff2 | 2019-10-09 10:37:42 +0100 | [diff] [blame] | 459 | */ |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 460 | switch (func) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 461 | case FFA_VERSION_32: |
| 462 | *args = api_ffa_version(args->arg1); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 463 | return true; |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 464 | case FFA_PARTITION_INFO_GET_32: { |
| 465 | struct ffa_uuid uuid; |
| 466 | |
| 467 | ffa_uuid_init(args->arg1, args->arg2, args->arg3, args->arg4, |
| 468 | &uuid); |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 469 | *args = api_ffa_partition_info_get(current, &uuid); |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 470 | return true; |
| 471 | } |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 472 | case FFA_ID_GET_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 473 | *args = api_ffa_id_get(current); |
Andrew Walbran | d230f66 | 2019-10-07 18:03:36 +0100 | [diff] [blame] | 474 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 475 | case FFA_FEATURES_32: |
| 476 | *args = api_ffa_features(args->arg1); |
Jose Marinho | c0f4ff2 | 2019-10-09 10:37:42 +0100 | [diff] [blame] | 477 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 478 | case FFA_RX_RELEASE_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 479 | *args = api_ffa_rx_release(current, next); |
Andrew Walbran | 8a0f5ca | 2019-11-05 13:12:23 +0000 | [diff] [blame] | 480 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 481 | case FFA_RXTX_MAP_64: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 482 | *args = api_ffa_rxtx_map(ipa_init(args->arg1), |
| 483 | ipa_init(args->arg2), args->arg3, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 484 | current, next); |
Andrew Walbran | bfffb0f | 2019-11-05 14:02:34 +0000 | [diff] [blame] | 485 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 486 | case FFA_YIELD_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 487 | *args = api_yield(current, next); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 488 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 489 | case FFA_MSG_SEND_32: |
| 490 | *args = api_ffa_msg_send( |
| 491 | ffa_msg_send_sender(*args), |
| 492 | ffa_msg_send_receiver(*args), ffa_msg_send_size(*args), |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 493 | ffa_msg_send_attributes(*args), current, next); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 494 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 495 | case FFA_MSG_WAIT_32: |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 496 | #if SECURE_WORLD == 1 |
| 497 | if (sp_boot_next(current, next, args)) { |
| 498 | return true; |
| 499 | } |
| 500 | #endif |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 501 | *args = api_ffa_msg_recv(true, current, next); |
Andrew Walbran | 0de4f16 | 2019-09-03 16:44:20 +0100 | [diff] [blame] | 502 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 503 | case FFA_MSG_POLL_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 504 | *args = api_ffa_msg_recv(false, current, next); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 505 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 506 | case FFA_RUN_32: |
| 507 | *args = api_ffa_run(ffa_vm_id(*args), ffa_vcpu_index(*args), |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 508 | current, next); |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame] | 509 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 510 | case FFA_MEM_DONATE_32: |
| 511 | case FFA_MEM_LEND_32: |
| 512 | case FFA_MEM_SHARE_32: |
| 513 | *args = api_ffa_mem_send(func, args->arg1, args->arg2, |
| 514 | ipa_init(args->arg3), args->arg4, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 515 | current); |
Andrew Walbran | 82d6d15 | 2019-12-24 15:02:06 +0000 | [diff] [blame] | 516 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 517 | case FFA_MEM_RETRIEVE_REQ_32: |
| 518 | *args = api_ffa_mem_retrieve_req(args->arg1, args->arg2, |
| 519 | ipa_init(args->arg3), |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 520 | args->arg4, current); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 521 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 522 | case FFA_MEM_RELINQUISH_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 523 | *args = api_ffa_mem_relinquish(current); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 524 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 525 | case FFA_MEM_RECLAIM_32: |
| 526 | *args = api_ffa_mem_reclaim( |
Andrew Walbran | 1bbe940 | 2020-04-30 16:47:13 +0100 | [diff] [blame] | 527 | ffa_assemble_handle(args->arg1, args->arg2), args->arg3, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 528 | current); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 529 | return true; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 530 | case FFA_MEM_FRAG_RX_32: |
| 531 | *args = api_ffa_mem_frag_rx(ffa_frag_handle(*args), args->arg3, |
| 532 | (args->arg4 >> 16) & 0xffff, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 533 | current); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 534 | return true; |
| 535 | case FFA_MEM_FRAG_TX_32: |
| 536 | *args = api_ffa_mem_frag_tx(ffa_frag_handle(*args), args->arg3, |
| 537 | (args->arg4 >> 16) & 0xffff, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 538 | current); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 539 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 540 | case FFA_MSG_SEND_DIRECT_REQ_64: |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 541 | case FFA_MSG_SEND_DIRECT_REQ_32: { |
| 542 | #if SECURE_WORLD == 1 |
| 543 | if (spmd_handler(args, current)) { |
| 544 | return true; |
| 545 | } |
| 546 | #endif |
Olivier Deprez | ee9d6a9 | 2019-11-26 09:14:11 +0000 | [diff] [blame] | 547 | *args = api_ffa_msg_send_direct_req( |
| 548 | ffa_msg_send_sender(*args), |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 549 | ffa_msg_send_receiver(*args), *args, current, next); |
Olivier Deprez | ee9d6a9 | 2019-11-26 09:14:11 +0000 | [diff] [blame] | 550 | return true; |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 551 | } |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 552 | case FFA_MSG_SEND_DIRECT_RESP_64: |
Olivier Deprez | ee9d6a9 | 2019-11-26 09:14:11 +0000 | [diff] [blame] | 553 | case FFA_MSG_SEND_DIRECT_RESP_32: |
| 554 | *args = api_ffa_msg_send_direct_resp( |
| 555 | ffa_msg_send_sender(*args), |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 556 | ffa_msg_send_receiver(*args), *args, current, next); |
Olivier Deprez | ee9d6a9 | 2019-11-26 09:14:11 +0000 | [diff] [blame] | 557 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 558 | case FFA_SECONDARY_EP_REGISTER_64: |
Max Shvetsov | 40108e7 | 2020-08-27 12:39:50 +0100 | [diff] [blame] | 559 | *args = api_ffa_secondary_ep_register(ipa_init(args->arg1), |
| 560 | current); |
| 561 | return true; |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame] | 562 | } |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 563 | |
| 564 | return false; |
| 565 | } |
| 566 | |
| 567 | /** |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame^] | 568 | * Set or clear VI/VF bits according to pending interrupts. |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 569 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame^] | 570 | static void vcpu_update_virtual_interrupts(struct vcpu *next) |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 571 | { |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame^] | 572 | struct vcpu_locked vcpu_locked; |
| 573 | |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 574 | if (next == NULL) { |
| 575 | /* |
| 576 | * Not switching vCPUs, set the bit for the current vCPU |
| 577 | * directly in the register. |
| 578 | */ |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 579 | |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame^] | 580 | vcpu_locked = vcpu_lock(current()); |
| 581 | set_virtual_irq_current( |
| 582 | vcpu_interrupt_irq_count_get(vcpu_locked) > 0); |
| 583 | set_virtual_fiq_current( |
| 584 | vcpu_interrupt_fiq_count_get(vcpu_locked) > 0); |
| 585 | vcpu_unlock(&vcpu_locked); |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 586 | } else if (vm_id_is_current_world(next->vm->id)) { |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 587 | /* |
| 588 | * About to switch vCPUs, set the bit for the vCPU to which we |
| 589 | * are switching in the saved copy of the register. |
| 590 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame^] | 591 | |
| 592 | vcpu_locked = vcpu_lock(next); |
| 593 | set_virtual_irq(&next->regs, |
| 594 | vcpu_interrupt_irq_count_get(vcpu_locked) > 0); |
| 595 | set_virtual_fiq(&next->regs, |
| 596 | vcpu_interrupt_fiq_count_get(vcpu_locked) > 0); |
| 597 | vcpu_unlock(&vcpu_locked); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 598 | } |
| 599 | } |
| 600 | |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 601 | /** |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 602 | * Handles PSCI and FF-A calls and writes the return value back to the registers |
| 603 | * of the vCPU. This is shared between smc_handler and hvc_handler. |
| 604 | * |
| 605 | * Returns true if the call was handled. |
| 606 | */ |
| 607 | static bool hvc_smc_handler(struct ffa_value args, struct vcpu *vcpu, |
| 608 | struct vcpu **next) |
| 609 | { |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 610 | /* Do not expect PSCI calls emitted from within the secure world. */ |
| 611 | #if SECURE_WORLD == 0 |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 612 | if (psci_handler(vcpu, args.func, args.arg1, args.arg2, args.arg3, |
| 613 | &vcpu->regs.r[0], next)) { |
| 614 | return true; |
| 615 | } |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 616 | #endif |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 617 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 618 | if (ffa_handler(&args, vcpu, next)) { |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 619 | arch_regs_set_retval(&vcpu->regs, args); |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame^] | 620 | vcpu_update_virtual_interrupts(*next); |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 621 | return true; |
| 622 | } |
| 623 | |
| 624 | return false; |
| 625 | } |
| 626 | |
| 627 | /** |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 628 | * Processes SMC instruction calls. |
| 629 | */ |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 630 | static struct vcpu *smc_handler(struct vcpu *vcpu) |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 631 | { |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 632 | struct ffa_value args = arch_regs_get_args(&vcpu->regs); |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 633 | struct vcpu *next = NULL; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 634 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 635 | if (hvc_smc_handler(args, vcpu, &next)) { |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 636 | return next; |
Andrew Walbran | 4579f700 | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 637 | } |
| 638 | |
Andrew Walbran | 85c3766 | 2019-12-05 16:29:33 +0000 | [diff] [blame] | 639 | switch (args.func & ~SMCCC_CONVENTION_MASK) { |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 640 | case HF_DEBUG_LOG: |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 641 | vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu); |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 642 | return NULL; |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 643 | } |
| 644 | |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 645 | smc_forwarder(vcpu->vm, &args); |
| 646 | arch_regs_set_retval(&vcpu->regs, args); |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 647 | return NULL; |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 648 | } |
| 649 | |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 650 | #if SECURE_WORLD == 1 |
| 651 | |
| 652 | /** |
| 653 | * Called from other_world_loop return from SMC. |
| 654 | * Processes SMC calls originating from the NWd. |
| 655 | */ |
| 656 | struct vcpu *smc_handler_from_nwd(struct vcpu *vcpu) |
| 657 | { |
| 658 | struct ffa_value args = arch_regs_get_args(&vcpu->regs); |
| 659 | struct vcpu *next = NULL; |
| 660 | |
| 661 | if (hvc_smc_handler(args, vcpu, &next)) { |
| 662 | return next; |
| 663 | } |
| 664 | |
| 665 | /* |
| 666 | * If the SMC emitted by the normal world is not handled in the secure |
| 667 | * world then return an error stating such ABI is not supported. Only |
| 668 | * FF-A calls are supported. We cannot return SMCCC_ERROR_UNKNOWN |
| 669 | * directly because the SPMD smc handler would not recognize it as a |
| 670 | * standard FF-A call returning from the SPMC. |
| 671 | */ |
| 672 | arch_regs_set_retval(&vcpu->regs, ffa_error(FFA_NOT_SUPPORTED)); |
| 673 | |
| 674 | return NULL; |
| 675 | } |
| 676 | |
| 677 | #endif |
| 678 | |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 679 | /* |
| 680 | * Exception vector offsets. |
| 681 | * See Arm Architecture Reference Manual Armv8-A, D1.10.2. |
| 682 | */ |
| 683 | |
| 684 | /** |
| 685 | * Offset for synchronous exceptions at current EL with SPx. |
| 686 | */ |
| 687 | #define OFFSET_CURRENT_SPX UINT64_C(0x200) |
| 688 | |
| 689 | /** |
| 690 | * Offset for synchronous exceptions at lower EL using AArch64. |
| 691 | */ |
| 692 | #define OFFSET_LOWER_EL_64 UINT64_C(0x400) |
| 693 | |
| 694 | /** |
| 695 | * Offset for synchronous exceptions at lower EL using AArch32. |
| 696 | */ |
| 697 | #define OFFSET_LOWER_EL_32 UINT64_C(0x600) |
| 698 | |
| 699 | /** |
| 700 | * Returns the address for the exception handler at EL1. |
| 701 | */ |
| 702 | static uintreg_t get_el1_exception_handler_addr(const struct vcpu *vcpu) |
| 703 | { |
| 704 | uintreg_t base_addr = read_msr(vbar_el1); |
| 705 | uintreg_t pe_mode = vcpu->regs.spsr & PSR_PE_MODE_MASK; |
| 706 | bool is_arch32 = vcpu->regs.spsr & PSR_ARCH_MODE_32; |
| 707 | |
| 708 | if (pe_mode == PSR_PE_MODE_EL0T) { |
| 709 | if (is_arch32) { |
| 710 | base_addr += OFFSET_LOWER_EL_32; |
| 711 | } else { |
| 712 | base_addr += OFFSET_LOWER_EL_64; |
| 713 | } |
| 714 | } else { |
| 715 | CHECK(!is_arch32); |
| 716 | base_addr += OFFSET_CURRENT_SPX; |
| 717 | } |
| 718 | |
| 719 | return base_addr; |
| 720 | } |
| 721 | |
| 722 | /** |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 723 | * Injects an exception with the specified Exception Syndrom Register value into |
| 724 | * the EL1. |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 725 | * |
| 726 | * NOTE: This function assumes that the lazy registers haven't been saved, and |
| 727 | * writes to the lazy registers of the CPU directly instead of the vCPU. |
| 728 | */ |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 729 | static void inject_el1_exception(struct vcpu *vcpu, uintreg_t esr_el1_value, |
| 730 | uintreg_t far_el1_value) |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 731 | { |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 732 | uintreg_t handler_address = get_el1_exception_handler_addr(vcpu); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 733 | |
| 734 | /* Update the CPU state to inject the exception. */ |
| 735 | write_msr(esr_el1, esr_el1_value); |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 736 | write_msr(far_el1, far_el1_value); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 737 | write_msr(elr_el1, vcpu->regs.pc); |
| 738 | write_msr(spsr_el1, vcpu->regs.spsr); |
| 739 | |
| 740 | /* |
| 741 | * Mask (disable) interrupts and run in EL1h mode. |
| 742 | * EL1h mode is used because by default, taking an exception selects the |
| 743 | * stack pointer for the target Exception level. The software can change |
| 744 | * that later in the handler if needed. |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 745 | */ |
| 746 | vcpu->regs.spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H; |
| 747 | |
| 748 | /* Transfer control to the exception hander. */ |
| 749 | vcpu->regs.pc = handler_address; |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 750 | } |
| 751 | |
| 752 | /** |
| 753 | * Injects a Data Abort exception (same exception level). |
| 754 | */ |
| 755 | static void inject_el1_data_abort_exception(struct vcpu *vcpu, |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 756 | uintreg_t esr_el2, |
| 757 | uintreg_t far_el2) |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 758 | { |
| 759 | /* |
| 760 | * ISS encoding remains the same, but the EC is changed to reflect |
| 761 | * where the exception came from. |
| 762 | * See Arm Architecture Reference Manual Armv8-A, pages D13-2943/2982. |
| 763 | */ |
| 764 | uintreg_t esr_el1_value = GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) | |
| 765 | (EC_DATA_ABORT_SAME_EL << ESR_EC_OFFSET); |
| 766 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 767 | dlog_notice("Injecting Data Abort exception into VM %#x.\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 768 | vcpu->vm->id); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 769 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 770 | inject_el1_exception(vcpu, esr_el1_value, far_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 771 | } |
| 772 | |
| 773 | /** |
| 774 | * Injects a Data Abort exception (same exception level). |
| 775 | */ |
| 776 | static void inject_el1_instruction_abort_exception(struct vcpu *vcpu, |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 777 | uintreg_t esr_el2, |
| 778 | uintreg_t far_el2) |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 779 | { |
| 780 | /* |
| 781 | * ISS encoding remains the same, but the EC is changed to reflect |
| 782 | * where the exception came from. |
| 783 | * See Arm Architecture Reference Manual Armv8-A, pages D13-2941/2980. |
| 784 | */ |
| 785 | uintreg_t esr_el1_value = |
| 786 | GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) | |
| 787 | (EC_INSTRUCTION_ABORT_SAME_EL << ESR_EC_OFFSET); |
| 788 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 789 | dlog_notice("Injecting Instruction Abort exception into VM %#x.\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 790 | vcpu->vm->id); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 791 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 792 | inject_el1_exception(vcpu, esr_el1_value, far_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 793 | } |
| 794 | |
| 795 | /** |
| 796 | * Injects an exception with an unknown reason into the EL1. |
| 797 | */ |
| 798 | static void inject_el1_unknown_exception(struct vcpu *vcpu, uintreg_t esr_el2) |
| 799 | { |
| 800 | uintreg_t esr_el1_value = |
| 801 | GET_ESR_IL(esr_el2) | (EC_UNKNOWN << ESR_EC_OFFSET); |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 802 | |
| 803 | /* |
| 804 | * The value of the far_el2 register is UNKNOWN in this case, |
| 805 | * therefore, don't propagate it to avoid leaking sensitive information. |
| 806 | */ |
| 807 | uintreg_t far_el1_value = 0; |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 808 | char *direction_str; |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 809 | |
| 810 | direction_str = ISS_IS_READ(esr_el2) ? "read" : "write"; |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 811 | dlog_notice( |
| 812 | "Trapped access to system register %s: op0=%d, op1=%d, crn=%d, " |
| 813 | "crm=%d, op2=%d, rt=%d.\n", |
| 814 | direction_str, GET_ISS_OP0(esr_el2), GET_ISS_OP1(esr_el2), |
| 815 | GET_ISS_CRN(esr_el2), GET_ISS_CRM(esr_el2), |
| 816 | GET_ISS_OP2(esr_el2), GET_ISS_RT(esr_el2)); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 817 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 818 | dlog_notice("Injecting Unknown Reason exception into VM %#x.\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 819 | vcpu->vm->id); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 820 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 821 | inject_el1_exception(vcpu, esr_el1_value, far_el1_value); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 822 | } |
| 823 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 824 | static struct vcpu *hvc_handler(struct vcpu *vcpu) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 825 | { |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 826 | struct ffa_value args = arch_regs_get_args(&vcpu->regs); |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 827 | struct vcpu *next = NULL; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 828 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 829 | if (hvc_smc_handler(args, vcpu, &next)) { |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 830 | return next; |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 831 | } |
Jose Marinho | fc0b2b6 | 2019-06-06 11:18:45 +0100 | [diff] [blame] | 832 | |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 833 | switch (args.func) { |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 834 | case HF_MAILBOX_WRITABLE_GET: |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 835 | vcpu->regs.r[0] = api_mailbox_writable_get(vcpu); |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 836 | break; |
| 837 | |
| 838 | case HF_MAILBOX_WAITER_GET: |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 839 | vcpu->regs.r[0] = api_mailbox_waiter_get(args.arg1, vcpu); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 840 | break; |
| 841 | |
Wedson Almeida Filho | c559d13 | 2019-01-09 19:33:40 +0000 | [diff] [blame] | 842 | case HF_INTERRUPT_ENABLE: |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame^] | 843 | vcpu->regs.r[0] = api_interrupt_enable(args.arg1, args.arg2, |
| 844 | args.arg3, vcpu); |
Andrew Walbran | 318f573 | 2018-11-20 16:23:42 +0000 | [diff] [blame] | 845 | break; |
| 846 | |
Wedson Almeida Filho | c559d13 | 2019-01-09 19:33:40 +0000 | [diff] [blame] | 847 | case HF_INTERRUPT_GET: |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 848 | vcpu->regs.r[0] = api_interrupt_get(vcpu); |
Andrew Walbran | 318f573 | 2018-11-20 16:23:42 +0000 | [diff] [blame] | 849 | break; |
| 850 | |
Wedson Almeida Filho | c559d13 | 2019-01-09 19:33:40 +0000 | [diff] [blame] | 851 | case HF_INTERRUPT_INJECT: |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 852 | vcpu->regs.r[0] = api_interrupt_inject(args.arg1, args.arg2, |
| 853 | args.arg3, vcpu, &next); |
Andrew Walbran | 318f573 | 2018-11-20 16:23:42 +0000 | [diff] [blame] | 854 | break; |
| 855 | |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 856 | case HF_DEBUG_LOG: |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 857 | vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu); |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 858 | break; |
| 859 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 860 | default: |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 861 | vcpu->regs.r[0] = SMCCC_ERROR_UNKNOWN; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 862 | } |
| 863 | |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame^] | 864 | vcpu_update_virtual_interrupts(next); |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 865 | |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 866 | return next; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 867 | } |
| 868 | |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 869 | struct vcpu *irq_lower(void) |
| 870 | { |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 871 | /* |
| 872 | * Switch back to primary VM, interrupts will be handled there. |
| 873 | * |
| 874 | * If the VM has aborted, this vCPU will be aborted when the scheduler |
| 875 | * tries to run it again. This means the interrupt will not be delayed |
| 876 | * by the aborted VM. |
| 877 | * |
| 878 | * TODO: Only switch when the interrupt isn't for the current VM. |
| 879 | */ |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame] | 880 | return api_preempt(current()); |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 881 | } |
| 882 | |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 883 | struct vcpu *fiq_lower(void) |
| 884 | { |
| 885 | return irq_lower(); |
| 886 | } |
| 887 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 888 | noreturn struct vcpu *serr_lower(void) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 889 | { |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 890 | /* |
| 891 | * SError exceptions should be isolated and handled by the responsible |
| 892 | * VM/exception level. Getting here indicates a bug, that isolation is |
| 893 | * not working, or a processor that does not support ARMv8.2-IESB, in |
| 894 | * which case Hafnium routes SError exceptions to EL2 (here). |
| 895 | */ |
| 896 | panic("SError from a lower exception level."); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 897 | } |
| 898 | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 899 | /** |
| 900 | * Initialises a fault info structure. It assumes that an FnV bit exists at |
| 901 | * bit offset 10 of the ESR, and that it is only valid when the bottom 6 bits of |
| 902 | * the ESR (the fault status code) are 010000; this is the case for both |
| 903 | * instruction and data aborts, but not necessarily for other exception reasons. |
| 904 | */ |
| 905 | static struct vcpu_fault_info fault_info_init(uintreg_t esr, |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 906 | const struct vcpu *vcpu, |
| 907 | uint32_t mode) |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 908 | { |
| 909 | uint32_t fsc = esr & 0x3f; |
| 910 | struct vcpu_fault_info r; |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 911 | uint64_t hpfar_el2_val; |
| 912 | uint64_t hpfar_el2_fipa; |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 913 | |
| 914 | r.mode = mode; |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 915 | r.pc = va_init(vcpu->regs.pc); |
| 916 | |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 917 | /* Get Hypervisor IPA Fault Address value. */ |
| 918 | hpfar_el2_val = read_msr(hpfar_el2); |
| 919 | |
| 920 | /* Extract Faulting IPA. */ |
| 921 | hpfar_el2_fipa = (hpfar_el2_val & HPFAR_EL2_FIPA) << 8; |
| 922 | |
| 923 | #if SECURE_WORLD == 1 |
| 924 | |
| 925 | /** |
| 926 | * Determine if faulting IPA targets NS space. |
| 927 | * At NS-EL2 hpfar_el2 bit 63 is RES0. At S-EL2, this bit determines if |
| 928 | * the faulting Stage-1 address output is a secure or non-secure IPA. |
| 929 | */ |
| 930 | if ((hpfar_el2_val & HPFAR_EL2_NS) != 0) { |
| 931 | r.mode |= MM_MODE_NS; |
| 932 | } |
| 933 | |
| 934 | #endif |
| 935 | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 936 | /* |
| 937 | * Check the FnV bit, which is only valid if dfsc/ifsc is 010000. It |
| 938 | * indicates that we cannot rely on far_el2. |
| 939 | */ |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 940 | if (fsc == 0x10 && esr & (1U << 10)) { |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 941 | r.vaddr = va_init(0); |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 942 | r.ipaddr = ipa_init(hpfar_el2_fipa); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 943 | } else { |
| 944 | r.vaddr = va_init(read_msr(far_el2)); |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 945 | r.ipaddr = ipa_init(hpfar_el2_fipa | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 946 | (read_msr(far_el2) & (PAGE_SIZE - 1))); |
| 947 | } |
| 948 | |
| 949 | return r; |
| 950 | } |
| 951 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 952 | struct vcpu *sync_lower_exception(uintreg_t esr, uintreg_t far) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 953 | { |
Wedson Almeida Filho | 00df6c7 | 2018-10-18 11:19:24 +0100 | [diff] [blame] | 954 | struct vcpu *vcpu = current(); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 955 | struct vcpu_fault_info info; |
Jose Marinho | 135dff3 | 2019-02-28 10:25:57 +0000 | [diff] [blame] | 956 | struct vcpu *new_vcpu; |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 957 | uintreg_t ec = GET_ESR_EC(esr); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 958 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 959 | switch (ec) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 960 | case EC_WFI_WFE: |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 961 | /* Skip the instruction. */ |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 962 | vcpu->regs.pc += GET_NEXT_PC_INC(esr); |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 963 | /* Check TI bit of ISS, 0 = WFI, 1 = WFE. */ |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 964 | if (esr & 1) { |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 965 | /* WFE */ |
| 966 | /* |
| 967 | * TODO: consider giving the scheduler more context, |
| 968 | * somehow. |
| 969 | */ |
Andrew Walbran | 16075b6 | 2019-09-03 17:11:07 +0100 | [diff] [blame] | 970 | api_yield(vcpu, &new_vcpu); |
Jose Marinho | 135dff3 | 2019-02-28 10:25:57 +0000 | [diff] [blame] | 971 | return new_vcpu; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 972 | } |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 973 | /* WFI */ |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 974 | return api_wait_for_interrupt(vcpu); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 975 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 976 | case EC_DATA_ABORT_LOWER_EL: |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 977 | info = fault_info_init( |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 978 | esr, vcpu, (esr & (1U << 6)) ? MM_MODE_W : MM_MODE_R); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 979 | if (vcpu_handle_page_fault(vcpu, &info)) { |
| 980 | return NULL; |
| 981 | } |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 982 | /* Inform the EL1 of the data abort. */ |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 983 | inject_el1_data_abort_exception(vcpu, esr, far); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 984 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 985 | /* Schedule the same VM to continue running. */ |
| 986 | return NULL; |
| 987 | |
| 988 | case EC_INSTRUCTION_ABORT_LOWER_EL: |
Andrew Scull | d3cfaad | 2019-04-04 11:34:10 +0100 | [diff] [blame] | 989 | info = fault_info_init(esr, vcpu, MM_MODE_X); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 990 | if (vcpu_handle_page_fault(vcpu, &info)) { |
| 991 | return NULL; |
| 992 | } |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 993 | /* Inform the EL1 of the instruction abort. */ |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 994 | inject_el1_instruction_abort_exception(vcpu, esr, far); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 995 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 996 | /* Schedule the same VM to continue running. */ |
| 997 | return NULL; |
| 998 | |
| 999 | case EC_HVC: |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1000 | return hvc_handler(vcpu); |
| 1001 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1002 | case EC_SMC: { |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 1003 | uintreg_t smc_pc = vcpu->regs.pc; |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 1004 | struct vcpu *next = smc_handler(vcpu); |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 1005 | |
| 1006 | /* Skip the SMC instruction. */ |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1007 | vcpu->regs.pc = smc_pc + GET_NEXT_PC_INC(esr); |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 1008 | |
Andrew Walbran | 3364565 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 1009 | return next; |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 1010 | } |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 1011 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1012 | case EC_MSR: |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1013 | /* |
| 1014 | * NOTE: This should never be reached because it goes through a |
| 1015 | * separate path handled by handle_system_register_access(). |
| 1016 | */ |
| 1017 | panic("Handled by handle_system_register_access()."); |
| 1018 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1019 | default: |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1020 | dlog_notice( |
| 1021 | "Unknown lower sync exception pc=%#x, esr=%#x, " |
| 1022 | "ec=%#x\n", |
| 1023 | vcpu->regs.pc, esr, ec); |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 1024 | break; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1025 | } |
| 1026 | |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1027 | /* |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 1028 | * The exception wasn't handled. Inject to the VM to give it chance to |
| 1029 | * handle as an unknown exception. |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1030 | */ |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1031 | inject_el1_unknown_exception(vcpu, esr); |
| 1032 | |
| 1033 | /* Schedule the same VM to continue running. */ |
| 1034 | return NULL; |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1035 | } |
| 1036 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1037 | /** |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 1038 | * Handles EC = 011000, MSR, MRS instruction traps. |
Fuad Tabba | ed294af | 2019-12-20 10:43:01 +0000 | [diff] [blame] | 1039 | * Returns non-null ONLY if the access failed and the vCPU is changing. |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1040 | */ |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1041 | void handle_system_register_access(uintreg_t esr_el2) |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1042 | { |
| 1043 | struct vcpu *vcpu = current(); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1044 | ffa_vm_id_t vm_id = vcpu->vm->id; |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1045 | uintreg_t ec = GET_ESR_EC(esr_el2); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1046 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1047 | CHECK(ec == EC_MSR); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1048 | /* |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1049 | * Handle accesses to debug and performance monitor registers. |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1050 | * Inject an exception for unhandled/unsupported registers. |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1051 | */ |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1052 | if (debug_el1_is_register_access(esr_el2)) { |
| 1053 | if (!debug_el1_process_access(vcpu, vm_id, esr_el2)) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1054 | inject_el1_unknown_exception(vcpu, esr_el2); |
| 1055 | return; |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1056 | } |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1057 | } else if (perfmon_is_register_access(esr_el2)) { |
| 1058 | if (!perfmon_process_access(vcpu, vm_id, esr_el2)) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1059 | inject_el1_unknown_exception(vcpu, esr_el2); |
| 1060 | return; |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1061 | } |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 1062 | } else if (feature_id_is_register_access(esr_el2)) { |
| 1063 | if (!feature_id_process_access(vcpu, esr_el2)) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1064 | inject_el1_unknown_exception(vcpu, esr_el2); |
| 1065 | return; |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 1066 | } |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1067 | } else { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1068 | inject_el1_unknown_exception(vcpu, esr_el2); |
| 1069 | return; |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1070 | } |
| 1071 | |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1072 | /* Instruction was fulfilled. Skip it and run the next one. */ |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1073 | vcpu->regs.pc += GET_NEXT_PC_INC(esr_el2); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1074 | } |