Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 9 | #include <stdnoreturn.h> |
| 10 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 11 | #include "hf/arch/barriers.h" |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 12 | #include "hf/arch/init.h" |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 13 | #include "hf/arch/mmu.h" |
Maksims Svecovs | 9ddf86a | 2021-05-06 17:17:21 +0100 | [diff] [blame] | 14 | #include "hf/arch/plat/ffa.h" |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 15 | #include "hf/arch/plat/smc.h" |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 16 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 17 | #include "hf/api.h" |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 18 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 19 | #include "hf/cpu.h" |
| 20 | #include "hf/dlog.h" |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 21 | #include "hf/ffa.h" |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 22 | #include "hf/ffa_internal.h" |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 23 | #include "hf/panic.h" |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 24 | #include "hf/plat/interrupts.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 25 | #include "hf/vm.h" |
| 26 | |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 27 | #include "vmapi/hf/call.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 28 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 29 | #include "debug_el1.h" |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 30 | #include "feature_id.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 31 | #include "msr.h" |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 32 | #include "perfmon.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 33 | #include "psci.h" |
Andrew Walbran | 3364565 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 34 | #include "psci_handler.h" |
Andrew Scull | 7fd4bb7 | 2018-12-08 23:40:12 +0000 | [diff] [blame] | 35 | #include "smc.h" |
Fuad Tabba | ba8c44d | 2019-09-23 14:38:58 +0100 | [diff] [blame] | 36 | #include "sysregs.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 37 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 38 | /** |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 39 | * Hypervisor Fault Address Register Non-Secure. |
| 40 | */ |
| 41 | #define HPFAR_EL2_NS (UINT64_C(0x1) << 63) |
| 42 | |
| 43 | /** |
| 44 | * Hypervisor Fault Address Register Faulting IPA. |
| 45 | */ |
| 46 | #define HPFAR_EL2_FIPA (UINT64_C(0xFFFFFFFFFF0)) |
| 47 | |
| 48 | /** |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 49 | * Gets the value to increment for the next PC. |
| 50 | * The ESR encodes whether the instruction is 2 bytes or 4 bytes long. |
| 51 | */ |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 52 | #define GET_NEXT_PC_INC(esr) (GET_ESR_IL(esr) ? 4 : 2) |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 53 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 54 | /** |
Andrew Walbran | 0dd67ff | 2019-09-12 16:38:50 +0100 | [diff] [blame] | 55 | * The Client ID field within X7 for an SMC64 call. |
| 56 | */ |
| 57 | #define CLIENT_ID_MASK UINT64_C(0xffff) |
| 58 | |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 59 | /* |
| 60 | * Target function IDs for framework messages from the SPMD. |
| 61 | */ |
Olivier Deprez | b76307d | 2022-06-09 17:17:45 +0200 | [diff] [blame] | 62 | #define SPMD_FWK_MSG_BIT (UINT64_C(1) << 31) |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 63 | #define SPMD_FWK_MSG_FUNC_MASK UINT64_C(0xFF) |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 64 | #define SPMD_FWK_MSG_PSCI_REQ UINT8_C(0x0) |
| 65 | #define SPMD_FWK_MSG_PSCI_RESP UINT8_C(0x2) |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 66 | #define SPMD_FWK_MSG_FFA_VERSION_REQ UINT8_C(0x8) |
| 67 | #define SPMD_FWK_MSG_FFA_VERSION_RESP UINT8_C(0x9) |
| 68 | |
Andrew Walbran | 0dd67ff | 2019-09-12 16:38:50 +0100 | [diff] [blame] | 69 | /** |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 70 | * Returns a reference to the currently executing vCPU. |
| 71 | */ |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 72 | static struct vcpu *current(void) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 73 | { |
Daniel Boulby | 3f78426 | 2021-09-27 13:02:54 +0100 | [diff] [blame] | 74 | // NOLINTNEXTLINE(performance-no-int-to-ptr) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 75 | return (struct vcpu *)read_msr(tpidr_el2); |
| 76 | } |
| 77 | |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 78 | /** |
| 79 | * Saves the state of per-vCPU peripherals, such as the virtual timer, and |
| 80 | * informs the arch-independent sections that registers have been saved. |
| 81 | */ |
| 82 | void complete_saving_state(struct vcpu *vcpu) |
| 83 | { |
Raghu Krishnamurthy | 32626c9 | 2021-01-17 09:57:29 -0800 | [diff] [blame] | 84 | if (has_vhe_support()) { |
| 85 | vcpu->regs.peripherals.cntv_cval_el0 = |
| 86 | read_msr(MSR_CNTV_CVAL_EL02); |
| 87 | vcpu->regs.peripherals.cntv_ctl_el0 = |
| 88 | read_msr(MSR_CNTV_CTL_EL02); |
| 89 | } else { |
| 90 | vcpu->regs.peripherals.cntv_cval_el0 = read_msr(cntv_cval_el0); |
| 91 | vcpu->regs.peripherals.cntv_ctl_el0 = read_msr(cntv_ctl_el0); |
| 92 | } |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 93 | |
| 94 | api_regs_state_saved(vcpu); |
| 95 | |
| 96 | /* |
| 97 | * If switching away from the primary, copy the current EL0 virtual |
| 98 | * timer registers to the corresponding EL2 physical timer registers. |
| 99 | * This is used to emulate the virtual timer for the primary in case it |
| 100 | * should fire while the secondary is running. |
| 101 | */ |
| 102 | if (vcpu->vm->id == HF_PRIMARY_VM_ID) { |
| 103 | /* |
| 104 | * Clear timer control register before copying compare value, to |
| 105 | * avoid a spurious timer interrupt. This could be a problem if |
| 106 | * the interrupt is configured as edge-triggered, as it would |
| 107 | * then be latched in. |
| 108 | */ |
| 109 | write_msr(cnthp_ctl_el2, 0); |
Raghu Krishnamurthy | 32626c9 | 2021-01-17 09:57:29 -0800 | [diff] [blame] | 110 | |
| 111 | if (has_vhe_support()) { |
| 112 | write_msr(cnthp_cval_el2, read_msr(MSR_CNTV_CVAL_EL02)); |
| 113 | write_msr(cnthp_ctl_el2, read_msr(MSR_CNTV_CTL_EL02)); |
| 114 | } else { |
| 115 | write_msr(cnthp_cval_el2, read_msr(cntv_cval_el0)); |
| 116 | write_msr(cnthp_ctl_el2, read_msr(cntv_ctl_el0)); |
| 117 | } |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 118 | } |
| 119 | } |
| 120 | |
| 121 | /** |
| 122 | * Restores the state of per-vCPU peripherals, such as the virtual timer. |
| 123 | */ |
| 124 | void begin_restoring_state(struct vcpu *vcpu) |
| 125 | { |
| 126 | /* |
| 127 | * Clear timer control register before restoring compare value, to avoid |
| 128 | * a spurious timer interrupt. This could be a problem if the interrupt |
| 129 | * is configured as edge-triggered, as it would then be latched in. |
| 130 | */ |
Raghu Krishnamurthy | 32626c9 | 2021-01-17 09:57:29 -0800 | [diff] [blame] | 131 | if (has_vhe_support()) { |
| 132 | write_msr(MSR_CNTV_CTL_EL02, 0); |
| 133 | write_msr(MSR_CNTV_CVAL_EL02, |
| 134 | vcpu->regs.peripherals.cntv_cval_el0); |
| 135 | write_msr(MSR_CNTV_CTL_EL02, |
| 136 | vcpu->regs.peripherals.cntv_ctl_el0); |
| 137 | } else { |
| 138 | write_msr(cntv_ctl_el0, 0); |
| 139 | write_msr(cntv_cval_el0, vcpu->regs.peripherals.cntv_cval_el0); |
| 140 | write_msr(cntv_ctl_el0, vcpu->regs.peripherals.cntv_ctl_el0); |
| 141 | } |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 142 | |
| 143 | /* |
| 144 | * If we are switching (back) to the primary, disable the EL2 physical |
| 145 | * timer which was being used to emulate the EL0 virtual timer, as the |
| 146 | * virtual timer is now running for the primary again. |
| 147 | */ |
| 148 | if (vcpu->vm->id == HF_PRIMARY_VM_ID) { |
| 149 | write_msr(cnthp_ctl_el2, 0); |
| 150 | write_msr(cnthp_cval_el2, 0); |
| 151 | } |
| 152 | } |
| 153 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 154 | /** |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 155 | * Invalidate all stage 1 TLB entries on the current (physical) CPU for the |
| 156 | * current VMID. |
| 157 | */ |
| 158 | static void invalidate_vm_tlb(void) |
| 159 | { |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 160 | /* |
| 161 | * Ensure that the last VTTBR write has taken effect so we invalidate |
| 162 | * the right set of TLB entries. |
| 163 | */ |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 164 | isb(); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 165 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 166 | __asm__ volatile("tlbi vmalle1"); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 167 | |
| 168 | /* |
| 169 | * Ensure that no instructions are fetched for the VM until after the |
| 170 | * TLB invalidation has taken effect. |
| 171 | */ |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 172 | isb(); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 173 | |
| 174 | /* |
| 175 | * Ensure that no data reads or writes for the VM happen until after the |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 176 | * TLB invalidation has taken effect. Non-shareable is enough because |
| 177 | * the TLB is local to the CPU. |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 178 | */ |
David Brazdil | 851948e | 2019-08-09 12:02:12 +0100 | [diff] [blame] | 179 | dsb(nsh); |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 180 | } |
| 181 | |
| 182 | /** |
| 183 | * Invalidates the TLB if a different vCPU is being run than the last vCPU of |
| 184 | * the same VM which was run on the current pCPU. |
| 185 | * |
| 186 | * This is necessary because VMs may (contrary to the architecture |
| 187 | * specification) use inconsistent ASIDs across vCPUs. c.f. KVM's similar |
| 188 | * workaround: |
| 189 | * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=94d0e5980d6791b9 |
| 190 | */ |
| 191 | void maybe_invalidate_tlb(struct vcpu *vcpu) |
| 192 | { |
| 193 | size_t current_cpu_index = cpu_index(vcpu->cpu); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 194 | ffa_vcpu_index_t new_vcpu_index = vcpu_index(vcpu); |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 195 | |
| 196 | if (vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] != |
| 197 | new_vcpu_index) { |
| 198 | /* |
| 199 | * The vCPU has changed since the last time this VM was run on |
| 200 | * this pCPU, so we need to invalidate the TLB. |
| 201 | */ |
| 202 | invalidate_vm_tlb(); |
| 203 | |
| 204 | /* Record the fact that this vCPU is now running on this CPU. */ |
| 205 | vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] = |
| 206 | new_vcpu_index; |
| 207 | } |
| 208 | } |
| 209 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 210 | noreturn void irq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 211 | { |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 212 | (void)elr; |
| 213 | (void)spsr; |
| 214 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 215 | panic("IRQ from current exception level."); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 216 | } |
| 217 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 218 | noreturn void fiq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 219 | { |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 220 | (void)elr; |
| 221 | (void)spsr; |
| 222 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 223 | panic("FIQ from current exception level."); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 224 | } |
| 225 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 226 | noreturn void serr_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 227 | { |
| 228 | (void)elr; |
| 229 | (void)spsr; |
| 230 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 231 | panic("SError from current exception level."); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 232 | } |
| 233 | |
David Brazdil | 768f69c | 2019-12-19 15:46:12 +0000 | [diff] [blame] | 234 | noreturn void sync_current_exception_noreturn(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 235 | { |
| 236 | uintreg_t esr = read_msr(esr_el2); |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 237 | uintreg_t ec = GET_ESR_EC(esr); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 238 | |
| 239 | (void)spsr; |
| 240 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 241 | switch (ec) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 242 | case EC_DATA_ABORT_SAME_EL: |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 243 | if (!(esr & (1U << 10))) { /* Check FnV bit. */ |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 244 | dlog_error( |
| 245 | "Data abort: pc=%#x, esr=%#x, ec=%#x, " |
| 246 | "far=%#x\n", |
| 247 | elr, esr, ec, read_msr(far_el2)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 248 | } else { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 249 | dlog_error( |
| 250 | "Data abort: pc=%#x, esr=%#x, ec=%#x, " |
| 251 | "far=invalid\n", |
| 252 | elr, esr, ec); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 253 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 254 | |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 255 | break; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 256 | |
| 257 | default: |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 258 | dlog_error( |
| 259 | "Unknown current sync exception pc=%#x, esr=%#x, " |
| 260 | "ec=%#x\n", |
| 261 | elr, esr, ec); |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 262 | break; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 263 | } |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 264 | |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 265 | panic("EL2 exception"); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 266 | } |
| 267 | |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 268 | /** |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 269 | * Sets or clears the VI bit in the HCR_EL2 register saved in the given |
| 270 | * arch_regs. |
| 271 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 272 | static void set_virtual_irq(struct arch_regs *r, bool enable) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 273 | { |
| 274 | if (enable) { |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 275 | r->hyp_state.hcr_el2 |= HCR_EL2_VI; |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 276 | } else { |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 277 | r->hyp_state.hcr_el2 &= ~HCR_EL2_VI; |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 278 | } |
| 279 | } |
| 280 | |
| 281 | /** |
| 282 | * Sets or clears the VI bit in the HCR_EL2 register. |
| 283 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 284 | static void set_virtual_irq_current(bool enable) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 285 | { |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 286 | struct vcpu *vcpu = current(); |
| 287 | uintreg_t hcr_el2 = vcpu->regs.hyp_state.hcr_el2; |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 288 | |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 289 | if (enable) { |
| 290 | hcr_el2 |= HCR_EL2_VI; |
| 291 | } else { |
| 292 | hcr_el2 &= ~HCR_EL2_VI; |
| 293 | } |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 294 | vcpu->regs.hyp_state.hcr_el2 = hcr_el2; |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 295 | } |
| 296 | |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 297 | /** |
| 298 | * Sets or clears the VF bit in the HCR_EL2 register saved in the given |
| 299 | * arch_regs. |
| 300 | */ |
| 301 | static void set_virtual_fiq(struct arch_regs *r, bool enable) |
| 302 | { |
| 303 | if (enable) { |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 304 | r->hyp_state.hcr_el2 |= HCR_EL2_VF; |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 305 | } else { |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 306 | r->hyp_state.hcr_el2 &= ~HCR_EL2_VF; |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 307 | } |
| 308 | } |
| 309 | |
| 310 | /** |
| 311 | * Sets or clears the VF bit in the HCR_EL2 register. |
| 312 | */ |
| 313 | static void set_virtual_fiq_current(bool enable) |
| 314 | { |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 315 | struct vcpu *vcpu = current(); |
| 316 | uintreg_t hcr_el2 = vcpu->regs.hyp_state.hcr_el2; |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 317 | |
| 318 | if (enable) { |
| 319 | hcr_el2 |= HCR_EL2_VF; |
| 320 | } else { |
| 321 | hcr_el2 &= ~HCR_EL2_VF; |
| 322 | } |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 323 | vcpu->regs.hyp_state.hcr_el2 = hcr_el2; |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 324 | } |
| 325 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 326 | #if SECURE_WORLD == 1 |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 327 | |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 328 | /** |
| 329 | * Handle special direct messages from SPMD to SPMC. For now related to power |
| 330 | * management only. |
| 331 | */ |
| 332 | static bool spmd_handler(struct ffa_value *args, struct vcpu *current) |
| 333 | { |
J-Alves | d6f4e14 | 2021-03-05 13:33:59 +0000 | [diff] [blame] | 334 | ffa_vm_id_t sender = ffa_sender(*args); |
| 335 | ffa_vm_id_t receiver = ffa_receiver(*args); |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 336 | ffa_vm_id_t current_vm_id = current->vm->id; |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 337 | uint32_t fwk_msg = ffa_fwk_msg(*args); |
| 338 | uint8_t fwk_msg_func_id = fwk_msg & SPMD_FWK_MSG_FUNC_MASK; |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 339 | |
| 340 | /* |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 341 | * Check if direct message request is originating from the SPMD, |
| 342 | * directed to the SPMC and the message is a framework message. |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 343 | */ |
| 344 | if (!(sender == HF_SPMD_VM_ID && receiver == HF_SPMC_VM_ID && |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 345 | current_vm_id == HF_OTHER_WORLD_ID) || |
| 346 | (fwk_msg & SPMD_FWK_MSG_BIT) == 0) { |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 347 | return false; |
| 348 | } |
| 349 | |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 350 | /* |
| 351 | * The framework message is conveyed by EL3/SPMD to SPMC so the |
| 352 | * current VM id must match to the other world VM id. |
| 353 | */ |
| 354 | CHECK(current->vm->id == HF_HYPERVISOR_VM_ID); |
| 355 | |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 356 | switch (fwk_msg_func_id) { |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 357 | case SPMD_FWK_MSG_PSCI_REQ: { |
| 358 | uint32_t psci_msg_response = PSCI_ERROR_NOT_SUPPORTED; |
Olivier Deprez | 181074b | 2023-02-02 14:53:23 +0100 | [diff] [blame] | 359 | struct vcpu *boot_vcpu = vcpu_get_boot_vcpu(); |
| 360 | struct vm *vm = boot_vcpu->vm; |
Olivier Deprez | 98f151e | 2023-01-10 15:08:54 +0100 | [diff] [blame] | 361 | struct vcpu *vcpu; |
| 362 | struct vcpu_locked vcpu_locked; |
Olivier Deprez | 181074b | 2023-02-02 14:53:23 +0100 | [diff] [blame] | 363 | |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 364 | /* |
| 365 | * TODO: the power management event reached the SPMC. |
| 366 | * In a later iteration, the power management event can |
| 367 | * be passed to the SP by resuming it. |
| 368 | */ |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 369 | switch (args->arg3) { |
| 370 | case PSCI_CPU_OFF: { |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 371 | dlog_verbose("cpu%u off notification!\n", |
| 372 | vcpu_index(vcpu)); |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 373 | |
Olivier Deprez | 98f151e | 2023-01-10 15:08:54 +0100 | [diff] [blame] | 374 | if (vm_power_management_cpu_off_requested(vm) == true) { |
| 375 | /* Allow only S-EL1 MP SPs to reach here. */ |
| 376 | CHECK(vm->el0_partition == false); |
| 377 | CHECK(vm->vcpu_count > 1); |
| 378 | |
| 379 | vcpu = vm_get_vcpu(vm, vcpu_index(current)); |
| 380 | vcpu_locked = vcpu_lock(vcpu); |
| 381 | vcpu->state = VCPU_STATE_OFF; |
| 382 | vcpu_unlock(&vcpu_locked); |
| 383 | cpu_off(vcpu->cpu); |
| 384 | } |
| 385 | |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 386 | psci_msg_response = PSCI_RETURN_SUCCESS; |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 387 | break; |
| 388 | } |
| 389 | default: |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 390 | dlog_error( |
| 391 | "FF-A PSCI framework message not handled " |
| 392 | "%#x %#x %#x %#x\n", |
| 393 | args->func, args->arg1, args->arg2, args->arg3); |
| 394 | psci_msg_response = PSCI_ERROR_NOT_SUPPORTED; |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 395 | } |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 396 | |
| 397 | *args = (struct ffa_value){ |
| 398 | .func = FFA_MSG_SEND_DIRECT_RESP_32, |
| 399 | .arg1 = ((uint64_t)HF_SPMC_VM_ID << 16) | HF_SPMD_VM_ID, |
| 400 | .arg2 = SPMD_FWK_MSG_BIT | SPMD_FWK_MSG_PSCI_RESP, |
| 401 | .arg3 = psci_msg_response}; |
| 402 | |
| 403 | return true; |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 404 | } |
| 405 | case SPMD_FWK_MSG_FFA_VERSION_REQ: { |
| 406 | struct ffa_value ret = api_ffa_version(current, args->arg3); |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 407 | *args = (struct ffa_value){ |
| 408 | .func = FFA_MSG_SEND_DIRECT_RESP_32, |
| 409 | .arg1 = ((uint64_t)HF_SPMC_VM_ID << 16) | HF_SPMD_VM_ID, |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 410 | /* Set bit 31 since this is a framework message. */ |
| 411 | .arg2 = SPMD_FWK_MSG_BIT | |
| 412 | SPMD_FWK_MSG_FFA_VERSION_RESP, |
| 413 | .arg3 = ret.func}; |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 414 | return true; |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 415 | } |
| 416 | default: |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 417 | dlog_error("FF-A framework message not handled %#x\n", |
| 418 | args->arg2); |
| 419 | |
| 420 | /* |
| 421 | * TODO: the framework message that was conveyed by a direct |
| 422 | * request is not handled although we still want to complete |
| 423 | * by a direct response. However, there is no defined error |
| 424 | * response to state that the message couldn't be handled. |
| 425 | * An alternative would be to return FFA_ERROR. |
| 426 | */ |
Daniel Boulby | efa381f | 2022-01-18 14:49:40 +0000 | [diff] [blame] | 427 | *args = (struct ffa_value){ |
| 428 | .func = FFA_MSG_SEND_DIRECT_RESP_32, |
| 429 | .arg1 = ((uint64_t)HF_SPMC_VM_ID << 16) | HF_SPMD_VM_ID, |
| 430 | /* Set bit 31 since this is a framework message. */ |
| 431 | .arg2 = SPMD_FWK_MSG_BIT | fwk_msg_func_id}; |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 432 | |
| 433 | return true; |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 434 | } |
| 435 | |
Olivier Deprez | a67ab88 | 2023-01-10 15:00:54 +0100 | [diff] [blame] | 436 | /* Should not reach this point. */ |
| 437 | assert(false); |
| 438 | |
| 439 | return false; |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 440 | } |
| 441 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 442 | #endif |
| 443 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 444 | /** |
| 445 | * Checks whether to block an SMC being forwarded from a VM. |
| 446 | */ |
| 447 | static bool smc_is_blocked(const struct vm *vm, uint32_t func) |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 448 | { |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 449 | bool block_by_default = !vm->smc_whitelist.permissive; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 450 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 451 | for (size_t i = 0; i < vm->smc_whitelist.smc_count; ++i) { |
| 452 | if (func == vm->smc_whitelist.smcs[i]) { |
| 453 | return false; |
| 454 | } |
| 455 | } |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 456 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 457 | dlog_notice("SMC %#010x attempted from VM %#x, blocked=%u\n", func, |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 458 | vm->id, block_by_default); |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 459 | |
| 460 | /* Access is still allowed in permissive mode. */ |
| 461 | return block_by_default; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 462 | } |
| 463 | |
| 464 | /** |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 465 | * Applies SMC access control according to manifest and forwards the call if |
| 466 | * access is granted. |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 467 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 468 | static void smc_forwarder(const struct vm *vm, struct ffa_value *args) |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 469 | { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 470 | struct ffa_value ret; |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 471 | uint32_t client_id = vm->id; |
| 472 | uintreg_t arg7 = args->arg7; |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 473 | |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 474 | if (smc_is_blocked(vm, args->func)) { |
| 475 | args->func = SMCCC_ERROR_UNKNOWN; |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 476 | return; |
| 477 | } |
| 478 | |
Andrew Walbran | 0dd67ff | 2019-09-12 16:38:50 +0100 | [diff] [blame] | 479 | /* |
| 480 | * Set the Client ID but keep the existing Secure OS ID and anything |
| 481 | * else (currently unspecified) that the client may have passed in the |
| 482 | * upper bits. |
| 483 | */ |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 484 | args->arg7 = client_id | (arg7 & ~CLIENT_ID_MASK); |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 485 | ret = smc_forward(args->func, args->arg1, args->arg2, args->arg3, |
| 486 | args->arg4, args->arg5, args->arg6, args->arg7); |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 487 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 488 | /* |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 489 | * Preserve the value passed by the caller, rather than the generated |
| 490 | * client_id. Note that this would also overwrite any return value that |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 491 | * may be in x7, but the SMCs that we are forwarding are legacy calls |
| 492 | * from before SMCCC 1.2 so won't have more than 4 return values anyway. |
| 493 | */ |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 494 | ret.arg7 = arg7; |
| 495 | |
| 496 | plat_smc_post_forward(*args, &ret); |
| 497 | |
| 498 | *args = ret; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 499 | } |
| 500 | |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 501 | /** |
| 502 | * In the normal world, ffa_handler is always called from the virtual FF-A |
Andrew Walbran | 8e8bf3f | 2020-10-07 17:58:20 +0100 | [diff] [blame] | 503 | * instance (from a VM in EL1). In the secure world, ffa_handler may be called |
| 504 | * from the virtual (a secure partition in S-EL1) or physical FF-A instance |
| 505 | * (from the normal world via EL3). The function returns true when the call is |
| 506 | * handled. The *next pointer is updated to the next vCPU to run, which might be |
| 507 | * the 'other world' vCPU if the call originated from the virtual FF-A instance |
| 508 | * and has to be forwarded down to EL3, or left as is to resume the current |
| 509 | * vCPU. |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 510 | */ |
| 511 | static bool ffa_handler(struct ffa_value *args, struct vcpu *current, |
| 512 | struct vcpu **next) |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 513 | { |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 514 | uint32_t func = args->func; |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 515 | |
Jose Marinho | c0f4ff2 | 2019-10-09 10:37:42 +0100 | [diff] [blame] | 516 | /* |
| 517 | * NOTE: When adding new methods to this handler update |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 518 | * api_ffa_features accordingly. |
Jose Marinho | c0f4ff2 | 2019-10-09 10:37:42 +0100 | [diff] [blame] | 519 | */ |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 520 | switch (func) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 521 | case FFA_VERSION_32: |
Daniel Boulby | baeaf2e | 2021-12-09 11:42:36 +0000 | [diff] [blame] | 522 | *args = api_ffa_version(current, args->arg1); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 523 | return true; |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 524 | case FFA_PARTITION_INFO_GET_32: { |
| 525 | struct ffa_uuid uuid; |
| 526 | |
| 527 | ffa_uuid_init(args->arg1, args->arg2, args->arg3, args->arg4, |
| 528 | &uuid); |
Daniel Boulby | b46cad1 | 2021-12-13 17:47:21 +0000 | [diff] [blame] | 529 | *args = api_ffa_partition_info_get(current, &uuid, args->arg5); |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 530 | return true; |
| 531 | } |
Raghu Krishnamurthy | 7592bcb | 2022-12-25 13:09:00 -0800 | [diff] [blame] | 532 | case FFA_PARTITION_INFO_GET_REGS_64: { |
| 533 | struct ffa_uuid uuid; |
| 534 | uint32_t w0; |
| 535 | uint32_t w1; |
| 536 | uint32_t w2; |
| 537 | uint32_t w3; |
| 538 | uint16_t start_index; |
| 539 | uint16_t tag; |
| 540 | |
| 541 | w0 = (uint32_t)(args->arg1 & 0xFFFFFFFF); |
| 542 | w1 = (uint32_t)(args->arg1 >> 32); |
| 543 | w2 = (uint32_t)(args->arg2 & 0xFFFFFFFF); |
| 544 | w3 = (uint32_t)(args->arg2 >> 32); |
| 545 | ffa_uuid_init(w0, w1, w2, w3, &uuid); |
| 546 | |
Raghu Krishnamurthy | d29411a | 2023-02-17 17:22:04 -0800 | [diff] [blame] | 547 | start_index = args->arg3 & 0xFFFF; |
| 548 | tag = (args->arg3 >> 16) & 0xFFFF; |
Raghu Krishnamurthy | 7592bcb | 2022-12-25 13:09:00 -0800 | [diff] [blame] | 549 | *args = api_ffa_partition_info_get_regs(current, &uuid, |
| 550 | start_index, tag); |
| 551 | return true; |
| 552 | } |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 553 | case FFA_ID_GET_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 554 | *args = api_ffa_id_get(current); |
Andrew Walbran | d230f66 | 2019-10-07 18:03:36 +0100 | [diff] [blame] | 555 | return true; |
Daniel Boulby | b2fb80e | 2021-02-03 15:09:23 +0000 | [diff] [blame] | 556 | case FFA_SPM_ID_GET_32: |
| 557 | *args = api_ffa_spm_id_get(); |
| 558 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 559 | case FFA_FEATURES_32: |
Karl Meakin | 34b8ae9 | 2023-01-13 13:33:07 +0000 | [diff] [blame^] | 560 | *args = api_ffa_features(args->arg1, args->arg2, |
| 561 | current->vm->ffa_version); |
Jose Marinho | c0f4ff2 | 2019-10-09 10:37:42 +0100 | [diff] [blame] | 562 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 563 | case FFA_RX_RELEASE_32: |
Federico Recanati | 7bef0b9 | 2022-03-17 14:56:22 +0100 | [diff] [blame] | 564 | *args = api_ffa_rx_release(ffa_receiver(*args), current, next); |
Andrew Walbran | 8a0f5ca | 2019-11-05 13:12:23 +0000 | [diff] [blame] | 565 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 566 | case FFA_RXTX_MAP_64: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 567 | *args = api_ffa_rxtx_map(ipa_init(args->arg1), |
| 568 | ipa_init(args->arg2), args->arg3, |
Federico Recanati | 9f1b653 | 2022-04-14 13:15:28 +0200 | [diff] [blame] | 569 | current); |
Andrew Walbran | bfffb0f | 2019-11-05 14:02:34 +0000 | [diff] [blame] | 570 | return true; |
Daniel Boulby | 9e420ca | 2021-07-07 15:03:49 +0100 | [diff] [blame] | 571 | case FFA_RXTX_UNMAP_32: |
J-Alves | 7007993 | 2022-12-07 17:32:20 +0000 | [diff] [blame] | 572 | *args = api_ffa_rxtx_unmap(ffa_vm_id(*args), current); |
Daniel Boulby | 9e420ca | 2021-07-07 15:03:49 +0100 | [diff] [blame] | 573 | return true; |
Federico Recanati | 644f046 | 2022-03-17 12:04:00 +0100 | [diff] [blame] | 574 | case FFA_RX_ACQUIRE_32: |
| 575 | *args = api_ffa_rx_acquire(ffa_receiver(*args), current); |
| 576 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 577 | case FFA_YIELD_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 578 | *args = api_yield(current, next); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 579 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 580 | case FFA_MSG_SEND_32: |
J-Alves | 27b7196 | 2022-12-12 15:29:58 +0000 | [diff] [blame] | 581 | *args = plat_ffa_msg_send( |
| 582 | ffa_sender(*args), ffa_receiver(*args), |
| 583 | ffa_msg_send_size(*args), current, next); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 584 | return true; |
Federico Recanati | 25053ee | 2022-03-14 15:01:53 +0100 | [diff] [blame] | 585 | case FFA_MSG_SEND2_32: |
| 586 | *args = api_ffa_msg_send2(ffa_sender(*args), |
| 587 | ffa_msg_send2_flags(*args), current); |
| 588 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 589 | case FFA_MSG_WAIT_32: |
Madhukar Pappireddy | 5522c67 | 2021-12-17 16:35:51 -0600 | [diff] [blame] | 590 | *args = api_ffa_msg_wait(current, next, args); |
Andrew Walbran | 0de4f16 | 2019-09-03 16:44:20 +0100 | [diff] [blame] | 591 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 592 | case FFA_MSG_POLL_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 593 | *args = api_ffa_msg_recv(false, current, next); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 594 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 595 | case FFA_RUN_32: |
| 596 | *args = api_ffa_run(ffa_vm_id(*args), ffa_vcpu_index(*args), |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 597 | current, next); |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame] | 598 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 599 | case FFA_MEM_DONATE_32: |
| 600 | case FFA_MEM_LEND_32: |
| 601 | case FFA_MEM_SHARE_32: |
| 602 | *args = api_ffa_mem_send(func, args->arg1, args->arg2, |
| 603 | ipa_init(args->arg3), args->arg4, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 604 | current); |
Andrew Walbran | 82d6d15 | 2019-12-24 15:02:06 +0000 | [diff] [blame] | 605 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 606 | case FFA_MEM_RETRIEVE_REQ_32: |
| 607 | *args = api_ffa_mem_retrieve_req(args->arg1, args->arg2, |
| 608 | ipa_init(args->arg3), |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 609 | args->arg4, current); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 610 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 611 | case FFA_MEM_RELINQUISH_32: |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 612 | *args = api_ffa_mem_relinquish(current); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 613 | return true; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 614 | case FFA_MEM_RECLAIM_32: |
| 615 | *args = api_ffa_mem_reclaim( |
Andrew Walbran | 1bbe940 | 2020-04-30 16:47:13 +0100 | [diff] [blame] | 616 | ffa_assemble_handle(args->arg1, args->arg2), args->arg3, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 617 | current); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 618 | return true; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 619 | case FFA_MEM_FRAG_RX_32: |
| 620 | *args = api_ffa_mem_frag_rx(ffa_frag_handle(*args), args->arg3, |
| 621 | (args->arg4 >> 16) & 0xffff, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 622 | current); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 623 | return true; |
| 624 | case FFA_MEM_FRAG_TX_32: |
| 625 | *args = api_ffa_mem_frag_tx(ffa_frag_handle(*args), args->arg3, |
| 626 | (args->arg4 >> 16) & 0xffff, |
Olivier Deprez | f33a6c7 | 2020-06-09 18:28:45 +0200 | [diff] [blame] | 627 | current); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 628 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 629 | case FFA_MSG_SEND_DIRECT_REQ_64: |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 630 | case FFA_MSG_SEND_DIRECT_REQ_32: { |
| 631 | #if SECURE_WORLD == 1 |
| 632 | if (spmd_handler(args, current)) { |
| 633 | return true; |
| 634 | } |
| 635 | #endif |
J-Alves | d6f4e14 | 2021-03-05 13:33:59 +0000 | [diff] [blame] | 636 | *args = api_ffa_msg_send_direct_req(ffa_sender(*args), |
| 637 | ffa_receiver(*args), *args, |
| 638 | current, next); |
Olivier Deprez | ee9d6a9 | 2019-11-26 09:14:11 +0000 | [diff] [blame] | 639 | return true; |
Max Shvetsov | 1ae74f1 | 2020-09-18 13:52:20 +0100 | [diff] [blame] | 640 | } |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 641 | case FFA_MSG_SEND_DIRECT_RESP_64: |
Olivier Deprez | ee9d6a9 | 2019-11-26 09:14:11 +0000 | [diff] [blame] | 642 | case FFA_MSG_SEND_DIRECT_RESP_32: |
J-Alves | d6f4e14 | 2021-03-05 13:33:59 +0000 | [diff] [blame] | 643 | *args = api_ffa_msg_send_direct_resp(ffa_sender(*args), |
| 644 | ffa_receiver(*args), *args, |
| 645 | current, next); |
Olivier Deprez | ee9d6a9 | 2019-11-26 09:14:11 +0000 | [diff] [blame] | 646 | return true; |
J-Alves | bc3de8b | 2020-12-07 14:32:04 +0000 | [diff] [blame] | 647 | case FFA_SECONDARY_EP_REGISTER_64: |
Olivier Deprez | d614d32 | 2021-06-18 15:21:00 +0200 | [diff] [blame] | 648 | /* |
| 649 | * DEN0077A FF-A v1.1 Beta0 section 18.3.2.1.1 |
| 650 | * The callee must return NOT_SUPPORTED if this function is |
| 651 | * invoked by a caller that implements version v1.0 of |
| 652 | * the Framework. |
| 653 | */ |
Max Shvetsov | 40108e7 | 2020-08-27 12:39:50 +0100 | [diff] [blame] | 654 | *args = api_ffa_secondary_ep_register(ipa_init(args->arg1), |
| 655 | current); |
| 656 | return true; |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 657 | case FFA_NOTIFICATION_BITMAP_CREATE_32: |
| 658 | *args = api_ffa_notification_bitmap_create( |
| 659 | (ffa_vm_id_t)args->arg1, (ffa_vcpu_count_t)args->arg2, |
| 660 | current); |
| 661 | return true; |
| 662 | case FFA_NOTIFICATION_BITMAP_DESTROY_32: |
| 663 | *args = api_ffa_notification_bitmap_destroy( |
| 664 | (ffa_vm_id_t)args->arg1, current); |
| 665 | return true; |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 666 | case FFA_NOTIFICATION_BIND_32: |
| 667 | *args = api_ffa_notification_update_bindings( |
| 668 | ffa_sender(*args), ffa_receiver(*args), args->arg2, |
| 669 | ffa_notifications_bitmap(args->arg3, args->arg4), true, |
| 670 | current); |
| 671 | return true; |
| 672 | case FFA_NOTIFICATION_UNBIND_32: |
| 673 | *args = api_ffa_notification_update_bindings( |
| 674 | ffa_sender(*args), ffa_receiver(*args), 0, |
| 675 | ffa_notifications_bitmap(args->arg3, args->arg4), false, |
| 676 | current); |
| 677 | return true; |
Raghu Krishnamurthy | ea6d25f | 2021-09-14 15:27:06 -0700 | [diff] [blame] | 678 | case FFA_MEM_PERM_SET_32: |
| 679 | case FFA_MEM_PERM_SET_64: |
| 680 | *args = api_ffa_mem_perm_set(va_init(args->arg1), args->arg2, |
| 681 | args->arg3, current); |
| 682 | return true; |
| 683 | case FFA_MEM_PERM_GET_32: |
| 684 | case FFA_MEM_PERM_GET_64: |
| 685 | *args = api_ffa_mem_perm_get(va_init(args->arg1), current); |
| 686 | return true; |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 687 | case FFA_NOTIFICATION_SET_32: |
| 688 | *args = api_ffa_notification_set( |
| 689 | ffa_sender(*args), ffa_receiver(*args), args->arg2, |
| 690 | ffa_notifications_bitmap(args->arg3, args->arg4), |
| 691 | current); |
| 692 | return true; |
| 693 | case FFA_NOTIFICATION_GET_32: |
| 694 | *args = api_ffa_notification_get( |
J-Alves | be6e303 | 2021-11-30 14:54:12 +0000 | [diff] [blame] | 695 | ffa_receiver(*args), ffa_notifications_get_vcpu(*args), |
| 696 | args->arg2, current); |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 697 | return true; |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 698 | case FFA_NOTIFICATION_INFO_GET_64: |
| 699 | *args = api_ffa_notification_info_get(current); |
| 700 | return true; |
Madhukar Pappireddy | 9e7a11f | 2021-08-03 13:59:42 -0500 | [diff] [blame] | 701 | case FFA_INTERRUPT_32: |
Madhukar Pappireddy | dc0c801 | 2022-06-21 15:23:14 -0500 | [diff] [blame] | 702 | *args = plat_ffa_handle_secure_interrupt(current, next, true); |
Madhukar Pappireddy | 9e7a11f | 2021-08-03 13:59:42 -0500 | [diff] [blame] | 703 | return true; |
Maksims Svecovs | 71b7670 | 2022-05-20 15:32:58 +0100 | [diff] [blame] | 704 | case FFA_CONSOLE_LOG_32: |
| 705 | case FFA_CONSOLE_LOG_64: |
| 706 | *args = api_ffa_console_log(*args, current); |
| 707 | return true; |
Andrew Walbran | f0c314d | 2019-10-02 14:24:26 +0100 | [diff] [blame] | 708 | } |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 709 | |
| 710 | return false; |
| 711 | } |
| 712 | |
| 713 | /** |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 714 | * Set or clear VI/VF bits according to pending interrupts. |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 715 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 716 | static void vcpu_update_virtual_interrupts(struct vcpu *next) |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 717 | { |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 718 | struct vcpu_locked vcpu_locked; |
| 719 | |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 720 | if (next == NULL) { |
Raghu Krishnamurthy | dce438c | 2021-02-28 15:01:03 -0800 | [diff] [blame] | 721 | if (current()->vm->el0_partition) { |
| 722 | return; |
| 723 | } |
| 724 | |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 725 | /* |
| 726 | * Not switching vCPUs, set the bit for the current vCPU |
| 727 | * directly in the register. |
| 728 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 729 | vcpu_locked = vcpu_lock(current()); |
| 730 | set_virtual_irq_current( |
| 731 | vcpu_interrupt_irq_count_get(vcpu_locked) > 0); |
| 732 | set_virtual_fiq_current( |
| 733 | vcpu_interrupt_fiq_count_get(vcpu_locked) > 0); |
| 734 | vcpu_unlock(&vcpu_locked); |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 735 | } else if (vm_id_is_current_world(next->vm->id)) { |
Raghu Krishnamurthy | dce438c | 2021-02-28 15:01:03 -0800 | [diff] [blame] | 736 | if (next->vm->el0_partition) { |
| 737 | return; |
| 738 | } |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 739 | /* |
| 740 | * About to switch vCPUs, set the bit for the vCPU to which we |
| 741 | * are switching in the saved copy of the register. |
| 742 | */ |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 743 | |
| 744 | vcpu_locked = vcpu_lock(next); |
| 745 | set_virtual_irq(&next->regs, |
| 746 | vcpu_interrupt_irq_count_get(vcpu_locked) > 0); |
| 747 | set_virtual_fiq(&next->regs, |
| 748 | vcpu_interrupt_fiq_count_get(vcpu_locked) > 0); |
| 749 | vcpu_unlock(&vcpu_locked); |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 750 | } |
| 751 | } |
| 752 | |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 753 | /** |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 754 | * Handles PSCI and FF-A calls and writes the return value back to the registers |
| 755 | * of the vCPU. This is shared between smc_handler and hvc_handler. |
| 756 | * |
| 757 | * Returns true if the call was handled. |
| 758 | */ |
| 759 | static bool hvc_smc_handler(struct ffa_value args, struct vcpu *vcpu, |
| 760 | struct vcpu **next) |
| 761 | { |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 762 | /* Do not expect PSCI calls emitted from within the secure world. */ |
| 763 | #if SECURE_WORLD == 0 |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 764 | if (psci_handler(vcpu, args.func, args.arg1, args.arg2, args.arg3, |
| 765 | &vcpu->regs.r[0], next)) { |
| 766 | return true; |
| 767 | } |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 768 | #endif |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 769 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 770 | if (ffa_handler(&args, vcpu, next)) { |
J-Alves | 1339402 | 2021-06-30 13:48:49 +0100 | [diff] [blame] | 771 | #if SECURE_WORLD == 1 |
| 772 | /* |
| 773 | * If giving back execution to the NWd, check if the Schedule |
Olivier Deprez | 618c8fc | 2022-05-30 15:27:49 +0200 | [diff] [blame] | 774 | * Receiver Interrupt has been delayed, and trigger it on |
| 775 | * current core if so. |
J-Alves | 1339402 | 2021-06-30 13:48:49 +0100 | [diff] [blame] | 776 | */ |
| 777 | if ((*next != NULL && (*next)->vm->id == HF_OTHER_WORLD_ID) || |
| 778 | (*next == NULL && vcpu->vm->id == HF_OTHER_WORLD_ID)) { |
| 779 | plat_ffa_sri_trigger_if_delayed(vcpu->cpu); |
| 780 | } |
| 781 | #endif |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 782 | arch_regs_set_retval(&vcpu->regs, args); |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 783 | vcpu_update_virtual_interrupts(*next); |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 784 | return true; |
| 785 | } |
| 786 | |
| 787 | return false; |
| 788 | } |
| 789 | |
| 790 | /** |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 791 | * Processes SMC instruction calls. |
| 792 | */ |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 793 | static struct vcpu *smc_handler(struct vcpu *vcpu) |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 794 | { |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 795 | struct ffa_value args = arch_regs_get_args(&vcpu->regs); |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 796 | struct vcpu *next = NULL; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 797 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 798 | if (hvc_smc_handler(args, vcpu, &next)) { |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 799 | return next; |
Andrew Walbran | 4579f700 | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 800 | } |
| 801 | |
Andrew Walbran | 85c3766 | 2019-12-05 16:29:33 +0000 | [diff] [blame] | 802 | switch (args.func & ~SMCCC_CONVENTION_MASK) { |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 803 | case HF_DEBUG_LOG: |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 804 | vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu); |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 805 | return NULL; |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 806 | } |
| 807 | |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 808 | smc_forwarder(vcpu->vm, &args); |
| 809 | arch_regs_set_retval(&vcpu->regs, args); |
Andrew Scull | 07b6bd3 | 2019-12-12 17:19:55 +0000 | [diff] [blame] | 810 | return NULL; |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 811 | } |
| 812 | |
Olivier Deprez | 3caed1c | 2021-02-05 12:07:36 +0100 | [diff] [blame] | 813 | #if SECURE_WORLD == 1 |
| 814 | |
| 815 | /** |
| 816 | * Called from other_world_loop return from SMC. |
| 817 | * Processes SMC calls originating from the NWd. |
| 818 | */ |
| 819 | struct vcpu *smc_handler_from_nwd(struct vcpu *vcpu) |
| 820 | { |
| 821 | struct ffa_value args = arch_regs_get_args(&vcpu->regs); |
| 822 | struct vcpu *next = NULL; |
| 823 | |
| 824 | if (hvc_smc_handler(args, vcpu, &next)) { |
| 825 | return next; |
| 826 | } |
| 827 | |
| 828 | /* |
| 829 | * If the SMC emitted by the normal world is not handled in the secure |
| 830 | * world then return an error stating such ABI is not supported. Only |
| 831 | * FF-A calls are supported. We cannot return SMCCC_ERROR_UNKNOWN |
| 832 | * directly because the SPMD smc handler would not recognize it as a |
| 833 | * standard FF-A call returning from the SPMC. |
| 834 | */ |
| 835 | arch_regs_set_retval(&vcpu->regs, ffa_error(FFA_NOT_SUPPORTED)); |
| 836 | |
| 837 | return NULL; |
| 838 | } |
| 839 | |
| 840 | #endif |
| 841 | |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 842 | /* |
| 843 | * Exception vector offsets. |
| 844 | * See Arm Architecture Reference Manual Armv8-A, D1.10.2. |
| 845 | */ |
| 846 | |
| 847 | /** |
| 848 | * Offset for synchronous exceptions at current EL with SPx. |
| 849 | */ |
| 850 | #define OFFSET_CURRENT_SPX UINT64_C(0x200) |
| 851 | |
| 852 | /** |
| 853 | * Offset for synchronous exceptions at lower EL using AArch64. |
| 854 | */ |
| 855 | #define OFFSET_LOWER_EL_64 UINT64_C(0x400) |
| 856 | |
| 857 | /** |
| 858 | * Offset for synchronous exceptions at lower EL using AArch32. |
| 859 | */ |
| 860 | #define OFFSET_LOWER_EL_32 UINT64_C(0x600) |
| 861 | |
| 862 | /** |
| 863 | * Returns the address for the exception handler at EL1. |
| 864 | */ |
| 865 | static uintreg_t get_el1_exception_handler_addr(const struct vcpu *vcpu) |
| 866 | { |
Raghu Krishnamurthy | 32626c9 | 2021-01-17 09:57:29 -0800 | [diff] [blame] | 867 | uintreg_t base_addr = has_vhe_support() ? read_msr(MSR_VBAR_EL12) |
| 868 | : read_msr(vbar_el1); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 869 | uintreg_t pe_mode = vcpu->regs.spsr & PSR_PE_MODE_MASK; |
| 870 | bool is_arch32 = vcpu->regs.spsr & PSR_ARCH_MODE_32; |
| 871 | |
| 872 | if (pe_mode == PSR_PE_MODE_EL0T) { |
| 873 | if (is_arch32) { |
| 874 | base_addr += OFFSET_LOWER_EL_32; |
| 875 | } else { |
| 876 | base_addr += OFFSET_LOWER_EL_64; |
| 877 | } |
| 878 | } else { |
| 879 | CHECK(!is_arch32); |
| 880 | base_addr += OFFSET_CURRENT_SPX; |
| 881 | } |
| 882 | |
| 883 | return base_addr; |
| 884 | } |
| 885 | |
| 886 | /** |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 887 | * Injects an exception with the specified Exception Syndrom Register value into |
| 888 | * the EL1. |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 889 | * |
| 890 | * NOTE: This function assumes that the lazy registers haven't been saved, and |
| 891 | * writes to the lazy registers of the CPU directly instead of the vCPU. |
| 892 | */ |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 893 | static void inject_el1_exception(struct vcpu *vcpu, uintreg_t esr_el1_value, |
| 894 | uintreg_t far_el1_value) |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 895 | { |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 896 | uintreg_t handler_address = get_el1_exception_handler_addr(vcpu); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 897 | |
| 898 | /* Update the CPU state to inject the exception. */ |
Raghu Krishnamurthy | 32626c9 | 2021-01-17 09:57:29 -0800 | [diff] [blame] | 899 | if (has_vhe_support()) { |
| 900 | write_msr(MSR_ESR_EL12, esr_el1_value); |
| 901 | write_msr(MSR_FAR_EL12, far_el1_value); |
| 902 | write_msr(MSR_ELR_EL12, vcpu->regs.pc); |
| 903 | write_msr(MSR_SPSR_EL12, vcpu->regs.spsr); |
| 904 | } else { |
| 905 | write_msr(esr_el1, esr_el1_value); |
| 906 | write_msr(far_el1, far_el1_value); |
| 907 | write_msr(elr_el1, vcpu->regs.pc); |
| 908 | write_msr(spsr_el1, vcpu->regs.spsr); |
| 909 | } |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 910 | |
| 911 | /* |
| 912 | * Mask (disable) interrupts and run in EL1h mode. |
| 913 | * EL1h mode is used because by default, taking an exception selects the |
| 914 | * stack pointer for the target Exception level. The software can change |
| 915 | * that later in the handler if needed. |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 916 | */ |
| 917 | vcpu->regs.spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H; |
| 918 | |
| 919 | /* Transfer control to the exception hander. */ |
| 920 | vcpu->regs.pc = handler_address; |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 921 | } |
| 922 | |
| 923 | /** |
| 924 | * Injects a Data Abort exception (same exception level). |
| 925 | */ |
| 926 | static void inject_el1_data_abort_exception(struct vcpu *vcpu, |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 927 | uintreg_t esr_el2, |
| 928 | uintreg_t far_el2) |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 929 | { |
| 930 | /* |
| 931 | * ISS encoding remains the same, but the EC is changed to reflect |
| 932 | * where the exception came from. |
| 933 | * See Arm Architecture Reference Manual Armv8-A, pages D13-2943/2982. |
| 934 | */ |
| 935 | uintreg_t esr_el1_value = GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) | |
| 936 | (EC_DATA_ABORT_SAME_EL << ESR_EC_OFFSET); |
| 937 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 938 | dlog_notice("Injecting Data Abort exception into VM %#x.\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 939 | vcpu->vm->id); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 940 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 941 | inject_el1_exception(vcpu, esr_el1_value, far_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 942 | } |
| 943 | |
| 944 | /** |
| 945 | * Injects a Data Abort exception (same exception level). |
| 946 | */ |
| 947 | static void inject_el1_instruction_abort_exception(struct vcpu *vcpu, |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 948 | uintreg_t esr_el2, |
| 949 | uintreg_t far_el2) |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 950 | { |
| 951 | /* |
| 952 | * ISS encoding remains the same, but the EC is changed to reflect |
| 953 | * where the exception came from. |
| 954 | * See Arm Architecture Reference Manual Armv8-A, pages D13-2941/2980. |
| 955 | */ |
| 956 | uintreg_t esr_el1_value = |
| 957 | GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) | |
| 958 | (EC_INSTRUCTION_ABORT_SAME_EL << ESR_EC_OFFSET); |
| 959 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 960 | dlog_notice("Injecting Instruction Abort exception into VM %#x.\n", |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 961 | vcpu->vm->id); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 962 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 963 | inject_el1_exception(vcpu, esr_el1_value, far_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 964 | } |
| 965 | |
| 966 | /** |
| 967 | * Injects an exception with an unknown reason into the EL1. |
| 968 | */ |
| 969 | static void inject_el1_unknown_exception(struct vcpu *vcpu, uintreg_t esr_el2) |
| 970 | { |
| 971 | uintreg_t esr_el1_value = |
| 972 | GET_ESR_IL(esr_el2) | (EC_UNKNOWN << ESR_EC_OFFSET); |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 973 | |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 974 | dlog_notice("Injecting Unknown Reason exception into VM %#x.\n", |
| 975 | vcpu->vm->id); |
| 976 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 977 | /* |
| 978 | * The value of the far_el2 register is UNKNOWN in this case, |
| 979 | * therefore, don't propagate it to avoid leaking sensitive information. |
| 980 | */ |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 981 | inject_el1_exception(vcpu, esr_el1_value, 0); |
| 982 | } |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 983 | |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 984 | /** |
| 985 | * Injects an exception because of a system register trap. |
| 986 | */ |
| 987 | static void inject_el1_sysreg_trap_exception(struct vcpu *vcpu, |
| 988 | uintreg_t esr_el2) |
| 989 | { |
| 990 | char *direction_str = ISS_IS_READ(esr_el2) ? "read" : "write"; |
| 991 | |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 992 | dlog_notice( |
| 993 | "Trapped access to system register %s: op0=%d, op1=%d, crn=%d, " |
| 994 | "crm=%d, op2=%d, rt=%d.\n", |
| 995 | direction_str, GET_ISS_OP0(esr_el2), GET_ISS_OP1(esr_el2), |
| 996 | GET_ISS_CRN(esr_el2), GET_ISS_CRM(esr_el2), |
| 997 | GET_ISS_OP2(esr_el2), GET_ISS_RT(esr_el2)); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 998 | |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 999 | inject_el1_unknown_exception(vcpu, esr_el2); |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 1000 | } |
| 1001 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 1002 | static struct vcpu *hvc_handler(struct vcpu *vcpu) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1003 | { |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 1004 | struct ffa_value args = arch_regs_get_args(&vcpu->regs); |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1005 | struct vcpu *next = NULL; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1006 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 1007 | if (hvc_smc_handler(args, vcpu, &next)) { |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1008 | return next; |
Andrew Walbran | 7d28d9a | 2019-08-30 16:24:58 +0100 | [diff] [blame] | 1009 | } |
Jose Marinho | fc0b2b6 | 2019-06-06 11:18:45 +0100 | [diff] [blame] | 1010 | |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 1011 | switch (args.func) { |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 1012 | case HF_MAILBOX_WRITABLE_GET: |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1013 | vcpu->regs.r[0] = api_mailbox_writable_get(vcpu); |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 1014 | break; |
| 1015 | |
| 1016 | case HF_MAILBOX_WAITER_GET: |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 1017 | vcpu->regs.r[0] = api_mailbox_waiter_get(args.arg1, vcpu); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 1018 | break; |
| 1019 | |
Wedson Almeida Filho | c559d13 | 2019-01-09 19:33:40 +0000 | [diff] [blame] | 1020 | case HF_INTERRUPT_ENABLE: |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 1021 | vcpu->regs.r[0] = api_interrupt_enable(args.arg1, args.arg2, |
| 1022 | args.arg3, vcpu); |
Andrew Walbran | 318f573 | 2018-11-20 16:23:42 +0000 | [diff] [blame] | 1023 | break; |
| 1024 | |
Wedson Almeida Filho | c559d13 | 2019-01-09 19:33:40 +0000 | [diff] [blame] | 1025 | case HF_INTERRUPT_GET: |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1026 | vcpu->regs.r[0] = api_interrupt_get(vcpu); |
Andrew Walbran | 318f573 | 2018-11-20 16:23:42 +0000 | [diff] [blame] | 1027 | break; |
| 1028 | |
Wedson Almeida Filho | c559d13 | 2019-01-09 19:33:40 +0000 | [diff] [blame] | 1029 | case HF_INTERRUPT_INJECT: |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 1030 | vcpu->regs.r[0] = api_interrupt_inject(args.arg1, args.arg2, |
| 1031 | args.arg3, vcpu, &next); |
Andrew Walbran | 318f573 | 2018-11-20 16:23:42 +0000 | [diff] [blame] | 1032 | break; |
| 1033 | |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 1034 | case HF_DEBUG_LOG: |
Andrew Walbran | 7f920af | 2019-09-03 17:09:30 +0100 | [diff] [blame] | 1035 | vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu); |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 1036 | break; |
| 1037 | |
Madhukar Pappireddy | f675bb6 | 2021-08-03 12:57:10 -0500 | [diff] [blame] | 1038 | #if SECURE_WORLD == 1 |
| 1039 | case HF_INTERRUPT_DEACTIVATE: |
| 1040 | vcpu->regs.r[0] = plat_ffa_interrupt_deactivate( |
| 1041 | args.arg1, args.arg2, vcpu); |
| 1042 | break; |
| 1043 | #endif |
| 1044 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1045 | default: |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1046 | vcpu->regs.r[0] = SMCCC_ERROR_UNKNOWN; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1047 | } |
| 1048 | |
Manish Pandey | 35e452f | 2021-02-18 21:36:34 +0000 | [diff] [blame] | 1049 | vcpu_update_virtual_interrupts(next); |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 1050 | |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1051 | return next; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1052 | } |
| 1053 | |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 1054 | struct vcpu *irq_lower(void) |
| 1055 | { |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 1056 | #if SECURE_WORLD == 1 |
| 1057 | struct vcpu *next = NULL; |
| 1058 | |
Madhukar Pappireddy | dc0c801 | 2022-06-21 15:23:14 -0500 | [diff] [blame] | 1059 | plat_ffa_handle_secure_interrupt(current(), &next, false); |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 1060 | |
| 1061 | /* |
| 1062 | * Since we are in interrupt context, set the bit for the |
| 1063 | * next vCPU directly in the register. |
| 1064 | */ |
| 1065 | vcpu_update_virtual_interrupts(next); |
| 1066 | |
| 1067 | return next; |
| 1068 | #else |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 1069 | /* |
| 1070 | * Switch back to primary VM, interrupts will be handled there. |
| 1071 | * |
| 1072 | * If the VM has aborted, this vCPU will be aborted when the scheduler |
| 1073 | * tries to run it again. This means the interrupt will not be delayed |
| 1074 | * by the aborted VM. |
| 1075 | * |
| 1076 | * TODO: Only switch when the interrupt isn't for the current VM. |
| 1077 | */ |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame] | 1078 | return api_preempt(current()); |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 1079 | #endif |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 1080 | } |
| 1081 | |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 1082 | struct vcpu *fiq_lower(void) |
| 1083 | { |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 1084 | #if SECURE_WORLD == 1 |
| 1085 | struct vcpu_locked current_locked; |
| 1086 | struct vcpu *current_vcpu = current(); |
Daniel Boulby | 4dd3f53 | 2021-09-21 09:57:08 +0100 | [diff] [blame] | 1087 | int64_t ret; |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 1088 | |
Madhukar Pappireddy | c40f55f | 2022-06-22 11:00:41 -0500 | [diff] [blame] | 1089 | assert(current_vcpu->vm->ns_interrupts_action != NS_ACTION_QUEUED); |
| 1090 | |
Maksims Svecovs | 9ddf86a | 2021-05-06 17:17:21 +0100 | [diff] [blame] | 1091 | if (plat_ffa_vm_managed_exit_supported(current_vcpu->vm)) { |
Madhukar Pappireddy | dd6fdfb | 2021-12-14 12:30:36 -0600 | [diff] [blame] | 1092 | uint8_t pmr = plat_interrupts_get_priority_mask(); |
| 1093 | |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 1094 | /* Mask all interrupts */ |
| 1095 | plat_interrupts_set_priority_mask(0x0); |
| 1096 | |
| 1097 | current_locked = vcpu_lock(current_vcpu); |
Madhukar Pappireddy | dd6fdfb | 2021-12-14 12:30:36 -0600 | [diff] [blame] | 1098 | current_vcpu->priority_mask = pmr; |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 1099 | ret = api_interrupt_inject_locked(current_locked, |
| 1100 | HF_MANAGED_EXIT_INTID, |
| 1101 | current_vcpu, NULL); |
| 1102 | if (ret != 0) { |
| 1103 | panic("Failed to inject managed exit interrupt\n"); |
| 1104 | } |
| 1105 | |
| 1106 | /* Entering managed exit sequence. */ |
| 1107 | current_vcpu->processing_managed_exit = true; |
| 1108 | |
| 1109 | vcpu_unlock(¤t_locked); |
| 1110 | |
| 1111 | /* |
| 1112 | * Since we are in interrupt context, set the bit for the |
| 1113 | * current vCPU directly in the register. |
| 1114 | */ |
| 1115 | vcpu_update_virtual_interrupts(NULL); |
| 1116 | |
| 1117 | /* Resume current vCPU. */ |
| 1118 | return NULL; |
| 1119 | } |
Manish Pandey | a5f39fb | 2020-09-11 09:47:11 +0100 | [diff] [blame] | 1120 | |
Madhukar Pappireddy | d46c06e | 2022-06-21 18:14:52 -0500 | [diff] [blame] | 1121 | /* |
| 1122 | * Unwind Normal World Scheduled Call chain in response to NS |
| 1123 | * Interrupt. |
| 1124 | */ |
| 1125 | return plat_ffa_unwind_nwd_call_chain_interrupt(current_vcpu); |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 1126 | #else |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 1127 | return irq_lower(); |
Madhukar Pappireddy | cbecc96 | 2021-08-03 13:11:57 -0500 | [diff] [blame] | 1128 | #endif |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 1129 | } |
| 1130 | |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 1131 | noreturn struct vcpu *serr_lower(void) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 1132 | { |
Fuad Tabba | d1d6798 | 2020-01-08 11:28:29 +0000 | [diff] [blame] | 1133 | /* |
| 1134 | * SError exceptions should be isolated and handled by the responsible |
| 1135 | * VM/exception level. Getting here indicates a bug, that isolation is |
| 1136 | * not working, or a processor that does not support ARMv8.2-IESB, in |
| 1137 | * which case Hafnium routes SError exceptions to EL2 (here). |
| 1138 | */ |
| 1139 | panic("SError from a lower exception level."); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 1140 | } |
| 1141 | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1142 | /** |
| 1143 | * Initialises a fault info structure. It assumes that an FnV bit exists at |
| 1144 | * bit offset 10 of the ESR, and that it is only valid when the bottom 6 bits of |
| 1145 | * the ESR (the fault status code) are 010000; this is the case for both |
| 1146 | * instruction and data aborts, but not necessarily for other exception reasons. |
| 1147 | */ |
| 1148 | static struct vcpu_fault_info fault_info_init(uintreg_t esr, |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 1149 | const struct vcpu *vcpu, |
| 1150 | uint32_t mode) |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1151 | { |
| 1152 | uint32_t fsc = esr & 0x3f; |
| 1153 | struct vcpu_fault_info r; |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 1154 | uint64_t hpfar_el2_val; |
| 1155 | uint64_t hpfar_el2_fipa; |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1156 | |
| 1157 | r.mode = mode; |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1158 | r.pc = va_init(vcpu->regs.pc); |
| 1159 | |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 1160 | /* Get Hypervisor IPA Fault Address value. */ |
| 1161 | hpfar_el2_val = read_msr(hpfar_el2); |
| 1162 | |
| 1163 | /* Extract Faulting IPA. */ |
| 1164 | hpfar_el2_fipa = (hpfar_el2_val & HPFAR_EL2_FIPA) << 8; |
| 1165 | |
| 1166 | #if SECURE_WORLD == 1 |
| 1167 | |
| 1168 | /** |
| 1169 | * Determine if faulting IPA targets NS space. |
| 1170 | * At NS-EL2 hpfar_el2 bit 63 is RES0. At S-EL2, this bit determines if |
| 1171 | * the faulting Stage-1 address output is a secure or non-secure IPA. |
| 1172 | */ |
| 1173 | if ((hpfar_el2_val & HPFAR_EL2_NS) != 0) { |
| 1174 | r.mode |= MM_MODE_NS; |
| 1175 | } |
| 1176 | |
| 1177 | #endif |
| 1178 | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1179 | /* |
| 1180 | * Check the FnV bit, which is only valid if dfsc/ifsc is 010000. It |
| 1181 | * indicates that we cannot rely on far_el2. |
| 1182 | */ |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 1183 | if (fsc == 0x10 && esr & (1U << 10)) { |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1184 | r.vaddr = va_init(0); |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 1185 | r.ipaddr = ipa_init(hpfar_el2_fipa); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1186 | } else { |
| 1187 | r.vaddr = va_init(read_msr(far_el2)); |
Olivier Deprez | 98ad2d2 | 2020-05-20 09:52:43 +0200 | [diff] [blame] | 1188 | r.ipaddr = ipa_init(hpfar_el2_fipa | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1189 | (read_msr(far_el2) & (PAGE_SIZE - 1))); |
| 1190 | } |
| 1191 | |
| 1192 | return r; |
| 1193 | } |
| 1194 | |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 1195 | struct vcpu *sync_lower_exception(uintreg_t esr, uintreg_t far) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1196 | { |
Wedson Almeida Filho | 00df6c7 | 2018-10-18 11:19:24 +0100 | [diff] [blame] | 1197 | struct vcpu *vcpu = current(); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1198 | struct vcpu_fault_info info; |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1199 | struct vcpu *new_vcpu = NULL; |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1200 | uintreg_t ec = GET_ESR_EC(esr); |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1201 | bool is_el0_partition = vcpu->vm->el0_partition; |
Raghu Krishnamurthy | f16b2ce | 2021-11-02 07:48:38 -0700 | [diff] [blame] | 1202 | bool resume = false; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1203 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1204 | switch (ec) { |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1205 | case EC_WFI_WFE: |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 1206 | /* Skip the instruction. */ |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1207 | vcpu->regs.pc += GET_NEXT_PC_INC(esr); |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1208 | |
| 1209 | /* |
| 1210 | * For EL0 partitions, treat both WFI and WFE the same way so |
| 1211 | * that FFA_RUN can be called on the partition to resume it. If |
| 1212 | * we treat WFI using api_wait_for_interrupt, the VCPU will be |
| 1213 | * in blocked waiting for interrupt but we cannot inject |
| 1214 | * interrupts into EL0 partitions. |
| 1215 | */ |
| 1216 | if (is_el0_partition) { |
| 1217 | api_yield(vcpu, &new_vcpu); |
| 1218 | return new_vcpu; |
| 1219 | } |
| 1220 | |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 1221 | /* Check TI bit of ISS, 0 = WFI, 1 = WFE. */ |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 1222 | if (esr & 1) { |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 1223 | /* WFE */ |
| 1224 | /* |
| 1225 | * TODO: consider giving the scheduler more context, |
| 1226 | * somehow. |
| 1227 | */ |
Andrew Walbran | 16075b6 | 2019-09-03 17:11:07 +0100 | [diff] [blame] | 1228 | api_yield(vcpu, &new_vcpu); |
Jose Marinho | 135dff3 | 2019-02-28 10:25:57 +0000 | [diff] [blame] | 1229 | return new_vcpu; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 1230 | } |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 1231 | /* WFI */ |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 1232 | return api_wait_for_interrupt(vcpu); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1233 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1234 | case EC_DATA_ABORT_LOWER_EL: |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1235 | info = fault_info_init( |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 1236 | esr, vcpu, (esr & (1U << 6)) ? MM_MODE_W : MM_MODE_R); |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1237 | |
Raghu Krishnamurthy | f16b2ce | 2021-11-02 07:48:38 -0700 | [diff] [blame] | 1238 | resume = vcpu_handle_page_fault(vcpu, &info); |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1239 | if (is_el0_partition) { |
| 1240 | dlog_warning("Data abort on EL0 partition\n"); |
Raghu Krishnamurthy | f16b2ce | 2021-11-02 07:48:38 -0700 | [diff] [blame] | 1241 | /* |
| 1242 | * Abort EL0 context if we should not resume the |
| 1243 | * context, or it is an alignment fault. |
| 1244 | * vcpu_handle_page_fault() only checks the mode of the |
| 1245 | * page in an architecture agnostic way but alignment |
| 1246 | * faults on aarch64 can happen on a correctly mapped |
| 1247 | * page. |
| 1248 | */ |
| 1249 | if (!resume || ((esr & 0x3f) == 0x21)) { |
| 1250 | return api_abort(vcpu); |
| 1251 | } |
| 1252 | } |
| 1253 | |
| 1254 | if (resume) { |
| 1255 | return NULL; |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1256 | } |
| 1257 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1258 | /* Inform the EL1 of the data abort. */ |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 1259 | inject_el1_data_abort_exception(vcpu, esr, far); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1260 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1261 | /* Schedule the same VM to continue running. */ |
| 1262 | return NULL; |
| 1263 | |
| 1264 | case EC_INSTRUCTION_ABORT_LOWER_EL: |
Andrew Scull | d3cfaad | 2019-04-04 11:34:10 +0100 | [diff] [blame] | 1265 | info = fault_info_init(esr, vcpu, MM_MODE_X); |
Raghu Krishnamurthy | f16b2ce | 2021-11-02 07:48:38 -0700 | [diff] [blame] | 1266 | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 1267 | if (vcpu_handle_page_fault(vcpu, &info)) { |
| 1268 | return NULL; |
| 1269 | } |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1270 | |
| 1271 | if (is_el0_partition) { |
| 1272 | dlog_warning("Instruction abort on EL0 partition\n"); |
| 1273 | return api_abort(vcpu); |
| 1274 | } |
| 1275 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1276 | /* Inform the EL1 of the instruction abort. */ |
Fuad Tabba | c3847c7 | 2020-08-11 09:32:25 +0100 | [diff] [blame] | 1277 | inject_el1_instruction_abort_exception(vcpu, esr, far); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 1278 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1279 | /* Schedule the same VM to continue running. */ |
| 1280 | return NULL; |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1281 | case EC_SVC: |
| 1282 | CHECK(is_el0_partition); |
| 1283 | return hvc_handler(vcpu); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1284 | case EC_HVC: |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1285 | if (is_el0_partition) { |
| 1286 | dlog_warning("Unexpected HVC Trap on EL0 partition\n"); |
| 1287 | return api_abort(vcpu); |
| 1288 | } |
Andrew Walbran | 59182d5 | 2019-09-23 17:55:39 +0100 | [diff] [blame] | 1289 | return hvc_handler(vcpu); |
| 1290 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1291 | case EC_SMC: { |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 1292 | uintreg_t smc_pc = vcpu->regs.pc; |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 1293 | struct vcpu *next = smc_handler(vcpu); |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 1294 | |
| 1295 | /* Skip the SMC instruction. */ |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1296 | vcpu->regs.pc = smc_pc + GET_NEXT_PC_INC(esr); |
Andrew Walbran | 9dadaf2 | 2019-12-05 16:50:55 +0000 | [diff] [blame] | 1297 | |
Andrew Walbran | 3364565 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 1298 | return next; |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 1299 | } |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 1300 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1301 | case EC_MSR: |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1302 | /* |
| 1303 | * NOTE: This should never be reached because it goes through a |
| 1304 | * separate path handled by handle_system_register_access(). |
| 1305 | */ |
| 1306 | panic("Handled by handle_system_register_access()."); |
| 1307 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1308 | default: |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1309 | dlog_notice( |
| 1310 | "Unknown lower sync exception pc=%#x, esr=%#x, " |
| 1311 | "ec=%#x\n", |
| 1312 | vcpu->regs.pc, esr, ec); |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 1313 | break; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 1314 | } |
| 1315 | |
Raghu Krishnamurthy | b5775d2 | 2021-02-26 18:54:40 -0800 | [diff] [blame] | 1316 | if (is_el0_partition) { |
| 1317 | return api_abort(vcpu); |
| 1318 | } |
| 1319 | |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1320 | /* |
Fuad Tabba | a48d122 | 2019-12-09 15:42:32 +0000 | [diff] [blame] | 1321 | * The exception wasn't handled. Inject to the VM to give it chance to |
| 1322 | * handle as an unknown exception. |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1323 | */ |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1324 | inject_el1_unknown_exception(vcpu, esr); |
| 1325 | |
| 1326 | /* Schedule the same VM to continue running. */ |
| 1327 | return NULL; |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1328 | } |
| 1329 | |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1330 | /** |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 1331 | * Handles EC = 011000, MSR, MRS instruction traps. |
Fuad Tabba | ed294af | 2019-12-20 10:43:01 +0000 | [diff] [blame] | 1332 | * Returns non-null ONLY if the access failed and the vCPU is changing. |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1333 | */ |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1334 | void handle_system_register_access(uintreg_t esr_el2) |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1335 | { |
| 1336 | struct vcpu *vcpu = current(); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1337 | ffa_vm_id_t vm_id = vcpu->vm->id; |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1338 | uintreg_t ec = GET_ESR_EC(esr_el2); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1339 | |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1340 | CHECK(ec == EC_MSR); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1341 | /* |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1342 | * Handle accesses to debug and performance monitor registers. |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1343 | * Inject an exception for unhandled/unsupported registers. |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1344 | */ |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1345 | if (debug_el1_is_register_access(esr_el2)) { |
| 1346 | if (!debug_el1_process_access(vcpu, vm_id, esr_el2)) { |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 1347 | inject_el1_sysreg_trap_exception(vcpu, esr_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1348 | return; |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1349 | } |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1350 | } else if (perfmon_is_register_access(esr_el2)) { |
| 1351 | if (!perfmon_process_access(vcpu, vm_id, esr_el2)) { |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 1352 | inject_el1_sysreg_trap_exception(vcpu, esr_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1353 | return; |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1354 | } |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 1355 | } else if (feature_id_is_register_access(esr_el2)) { |
| 1356 | if (!feature_id_process_access(vcpu, esr_el2)) { |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 1357 | inject_el1_sysreg_trap_exception(vcpu, esr_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1358 | return; |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 1359 | } |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1360 | } else { |
Olivier Deprez | da14ddc | 2022-08-11 14:14:41 +0200 | [diff] [blame] | 1361 | inject_el1_sysreg_trap_exception(vcpu, esr_el2); |
Fuad Tabba | b86325a | 2020-01-10 13:38:15 +0000 | [diff] [blame] | 1362 | return; |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1363 | } |
| 1364 | |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 1365 | /* Instruction was fulfilled. Skip it and run the next one. */ |
Fuad Tabba | 3e9b022 | 2019-11-11 16:47:50 +0000 | [diff] [blame] | 1366 | vcpu->regs.pc += GET_NEXT_PC_INC(esr_el2); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 1367 | } |