Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 17 | #include <stdnoreturn.h> |
| 18 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 19 | #include "hf/arch/barriers.h" |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 20 | #include "hf/arch/init.h" |
David Brazdil | 851948e | 2019-08-09 12:02:12 +0100 | [diff] [blame] | 21 | #include "hf/arch/mm.h" |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 22 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 23 | #include "hf/api.h" |
| 24 | #include "hf/cpu.h" |
| 25 | #include "hf/dlog.h" |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 26 | #include "hf/panic.h" |
Jose Marinho | a1dfeda | 2019-02-27 16:46:03 +0000 | [diff] [blame] | 27 | #include "hf/spci.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 28 | #include "hf/vm.h" |
| 29 | |
Andrew Scull | f35a5c9 | 2018-08-07 18:09:46 +0100 | [diff] [blame] | 30 | #include "vmapi/hf/call.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 31 | |
| 32 | #include "msr.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 33 | #include "psci.h" |
Andrew Walbran | 3364565 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 34 | #include "psci_handler.h" |
Andrew Scull | 7fd4bb7 | 2018-12-08 23:40:12 +0000 | [diff] [blame] | 35 | #include "smc.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 36 | |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 37 | #define HCR_EL2_VI (1u << 7) |
| 38 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 39 | struct hvc_handler_return { |
Andrew Scull | 3740287 | 2018-10-24 14:23:06 +0100 | [diff] [blame] | 40 | uintreg_t user_ret; |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 41 | struct vcpu *new; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 42 | }; |
| 43 | |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 44 | /* Gets a reference to the currently executing vCPU. */ |
| 45 | static struct vcpu *current(void) |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 46 | { |
| 47 | return (struct vcpu *)read_msr(tpidr_el2); |
| 48 | } |
| 49 | |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 50 | /** |
| 51 | * Saves the state of per-vCPU peripherals, such as the virtual timer, and |
| 52 | * informs the arch-independent sections that registers have been saved. |
| 53 | */ |
| 54 | void complete_saving_state(struct vcpu *vcpu) |
| 55 | { |
Andrew Walbran | 6480f8f | 2019-06-05 17:39:14 +0100 | [diff] [blame] | 56 | vcpu->regs.peripherals.cntv_cval_el0 = read_msr(cntv_cval_el0); |
| 57 | vcpu->regs.peripherals.cntv_ctl_el0 = read_msr(cntv_ctl_el0); |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 58 | |
| 59 | api_regs_state_saved(vcpu); |
| 60 | |
| 61 | /* |
| 62 | * If switching away from the primary, copy the current EL0 virtual |
| 63 | * timer registers to the corresponding EL2 physical timer registers. |
| 64 | * This is used to emulate the virtual timer for the primary in case it |
| 65 | * should fire while the secondary is running. |
| 66 | */ |
| 67 | if (vcpu->vm->id == HF_PRIMARY_VM_ID) { |
| 68 | /* |
| 69 | * Clear timer control register before copying compare value, to |
| 70 | * avoid a spurious timer interrupt. This could be a problem if |
| 71 | * the interrupt is configured as edge-triggered, as it would |
| 72 | * then be latched in. |
| 73 | */ |
| 74 | write_msr(cnthp_ctl_el2, 0); |
| 75 | write_msr(cnthp_cval_el2, read_msr(cntv_cval_el0)); |
| 76 | write_msr(cnthp_ctl_el2, read_msr(cntv_ctl_el0)); |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | /** |
| 81 | * Restores the state of per-vCPU peripherals, such as the virtual timer. |
| 82 | */ |
| 83 | void begin_restoring_state(struct vcpu *vcpu) |
| 84 | { |
| 85 | /* |
| 86 | * Clear timer control register before restoring compare value, to avoid |
| 87 | * a spurious timer interrupt. This could be a problem if the interrupt |
| 88 | * is configured as edge-triggered, as it would then be latched in. |
| 89 | */ |
| 90 | write_msr(cntv_ctl_el0, 0); |
Andrew Walbran | 6480f8f | 2019-06-05 17:39:14 +0100 | [diff] [blame] | 91 | write_msr(cntv_cval_el0, vcpu->regs.peripherals.cntv_cval_el0); |
| 92 | write_msr(cntv_ctl_el0, vcpu->regs.peripherals.cntv_ctl_el0); |
Andrew Walbran | 1f8d487 | 2018-12-20 11:21:32 +0000 | [diff] [blame] | 93 | |
| 94 | /* |
| 95 | * If we are switching (back) to the primary, disable the EL2 physical |
| 96 | * timer which was being used to emulate the EL0 virtual timer, as the |
| 97 | * virtual timer is now running for the primary again. |
| 98 | */ |
| 99 | if (vcpu->vm->id == HF_PRIMARY_VM_ID) { |
| 100 | write_msr(cnthp_ctl_el2, 0); |
| 101 | write_msr(cnthp_cval_el2, 0); |
| 102 | } |
| 103 | } |
| 104 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 105 | /** |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 106 | * Invalidate all stage 1 TLB entries on the current (physical) CPU for the |
| 107 | * current VMID. |
| 108 | */ |
| 109 | static void invalidate_vm_tlb(void) |
| 110 | { |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 111 | /* |
| 112 | * Ensure that the last VTTBR write has taken effect so we invalidate |
| 113 | * the right set of TLB entries. |
| 114 | */ |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 115 | isb(); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 116 | |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 117 | __asm__ volatile("tlbi vmalle1"); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 118 | |
| 119 | /* |
| 120 | * Ensure that no instructions are fetched for the VM until after the |
| 121 | * TLB invalidation has taken effect. |
| 122 | */ |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 123 | isb(); |
Andrew Walbran | cff1f68 | 2019-07-04 14:52:45 +0100 | [diff] [blame] | 124 | |
| 125 | /* |
| 126 | * Ensure that no data reads or writes for the VM happen until after the |
| 127 | * TLB invalidation has taken effect. Non-sharable is enough because the |
| 128 | * TLB is local to the CPU. |
| 129 | */ |
David Brazdil | 851948e | 2019-08-09 12:02:12 +0100 | [diff] [blame] | 130 | dsb(nsh); |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 131 | } |
| 132 | |
| 133 | /** |
| 134 | * Invalidates the TLB if a different vCPU is being run than the last vCPU of |
| 135 | * the same VM which was run on the current pCPU. |
| 136 | * |
| 137 | * This is necessary because VMs may (contrary to the architecture |
| 138 | * specification) use inconsistent ASIDs across vCPUs. c.f. KVM's similar |
| 139 | * workaround: |
| 140 | * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=94d0e5980d6791b9 |
| 141 | */ |
| 142 | void maybe_invalidate_tlb(struct vcpu *vcpu) |
| 143 | { |
| 144 | size_t current_cpu_index = cpu_index(vcpu->cpu); |
Andrew Walbran | b037d5b | 2019-06-25 17:19:41 +0100 | [diff] [blame] | 145 | spci_vcpu_index_t new_vcpu_index = vcpu_index(vcpu); |
Andrew Walbran | 1f32e72 | 2019-06-07 17:57:26 +0100 | [diff] [blame] | 146 | |
| 147 | if (vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] != |
| 148 | new_vcpu_index) { |
| 149 | /* |
| 150 | * The vCPU has changed since the last time this VM was run on |
| 151 | * this pCPU, so we need to invalidate the TLB. |
| 152 | */ |
| 153 | invalidate_vm_tlb(); |
| 154 | |
| 155 | /* Record the fact that this vCPU is now running on this CPU. */ |
| 156 | vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] = |
| 157 | new_vcpu_index; |
| 158 | } |
| 159 | } |
| 160 | |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 161 | noreturn void irq_current_exception(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 162 | { |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 163 | (void)elr; |
| 164 | (void)spsr; |
| 165 | |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 166 | panic("IRQ from current"); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 167 | } |
| 168 | |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 169 | noreturn void fiq_current_exception(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 170 | { |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 171 | (void)elr; |
| 172 | (void)spsr; |
| 173 | |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 174 | panic("FIQ from current"); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 175 | } |
| 176 | |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 177 | noreturn void serr_current_exception(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 178 | { |
| 179 | (void)elr; |
| 180 | (void)spsr; |
| 181 | |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 182 | panic("SERR from current"); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 183 | } |
| 184 | |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 185 | noreturn void sync_current_exception(uintreg_t elr, uintreg_t spsr) |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 186 | { |
| 187 | uintreg_t esr = read_msr(esr_el2); |
| 188 | |
| 189 | (void)spsr; |
| 190 | |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 191 | switch (esr >> 26) { |
| 192 | case 0x25: /* EC = 100101, Data abort. */ |
Andrew Walbran | ac5b261 | 2019-07-12 16:44:19 +0100 | [diff] [blame] | 193 | dlog("Data abort: pc=%#x, esr=%#x, ec=%#x", elr, esr, |
Andrew Scull | 4f170f5 | 2018-07-19 12:58:20 +0100 | [diff] [blame] | 194 | esr >> 26); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 195 | if (!(esr & (1u << 10))) { /* Check FnV bit. */ |
Andrew Walbran | ac5b261 | 2019-07-12 16:44:19 +0100 | [diff] [blame] | 196 | dlog(", far=%#x", read_msr(far_el2)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 197 | } else { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 198 | dlog(", far=invalid"); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 199 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 200 | |
| 201 | dlog("\n"); |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 202 | break; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 203 | |
| 204 | default: |
Andrew Walbran | ac5b261 | 2019-07-12 16:44:19 +0100 | [diff] [blame] | 205 | dlog("Unknown current sync exception pc=%#x, esr=%#x, " |
| 206 | "ec=%#x\n", |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 207 | elr, esr, esr >> 26); |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 208 | break; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 209 | } |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 210 | |
Andrew Scull | a9c172d | 2019-04-03 14:10:00 +0100 | [diff] [blame] | 211 | panic("EL2 exception"); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 212 | } |
| 213 | |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 214 | /** |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 215 | * Sets or clears the VI bit in the HCR_EL2 register saved in the given |
| 216 | * arch_regs. |
| 217 | */ |
| 218 | static void set_virtual_interrupt(struct arch_regs *r, bool enable) |
| 219 | { |
| 220 | if (enable) { |
| 221 | r->lazy.hcr_el2 |= HCR_EL2_VI; |
| 222 | } else { |
| 223 | r->lazy.hcr_el2 &= ~HCR_EL2_VI; |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | /** |
| 228 | * Sets or clears the VI bit in the HCR_EL2 register. |
| 229 | */ |
| 230 | static void set_virtual_interrupt_current(bool enable) |
| 231 | { |
| 232 | uintreg_t hcr_el2 = read_msr(hcr_el2); |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 233 | |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 234 | if (enable) { |
| 235 | hcr_el2 |= HCR_EL2_VI; |
| 236 | } else { |
| 237 | hcr_el2 &= ~HCR_EL2_VI; |
| 238 | } |
| 239 | write_msr(hcr_el2, hcr_el2); |
| 240 | } |
| 241 | |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 242 | static bool smc_check_client_privileges(const struct vcpu *vcpu) |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 243 | { |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 244 | (void)vcpu; /*UNUSED*/ |
| 245 | |
| 246 | /* |
| 247 | * TODO(b/132421503): Check for privileges based on manifest. |
| 248 | * Currently returns false, which maintains existing behavior. |
| 249 | */ |
| 250 | |
| 251 | return false; |
| 252 | } |
| 253 | |
| 254 | /** |
| 255 | * Applies SMC access control according to manifest. |
| 256 | * Forwards the call if access is granted. |
| 257 | * Returns true if call is forwarded. |
| 258 | */ |
| 259 | static bool smc_forwarder(const struct vcpu *vcpu, smc_res_t *ret) |
| 260 | { |
| 261 | uint32_t func = vcpu->regs.r[0]; |
| 262 | /* TODO(b/132421503): obtain vmid according to new scheme. */ |
| 263 | uint32_t client_id = vcpu->vm->id; |
| 264 | |
| 265 | if (smc_check_client_privileges(vcpu)) { |
Andrew Scull | 52b8ea1 | 2019-08-30 19:16:09 +0100 | [diff] [blame^] | 266 | *ret = smc_forward(func, vcpu->regs.r[1], vcpu->regs.r[2], |
| 267 | vcpu->regs.r[3], vcpu->regs.r[4], |
| 268 | vcpu->regs.r[5], vcpu->regs.r[6], client_id); |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 269 | return true; |
| 270 | } |
| 271 | |
| 272 | return false; |
| 273 | } |
| 274 | |
| 275 | /** |
| 276 | * Processes SMC instruction calls. |
| 277 | */ |
| 278 | static bool smc_handler(struct vcpu *vcpu, smc_res_t *ret, struct vcpu **next) |
| 279 | { |
| 280 | uint32_t func = vcpu->regs.r[0]; |
| 281 | |
| 282 | if (psci_handler(vcpu, func, vcpu->regs.r[1], vcpu->regs.r[2], |
| 283 | vcpu->regs.r[3], &(ret->res0), next)) { |
| 284 | /* SMC PSCI calls are processed by the PSCI handler. */ |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 285 | return true; |
| 286 | } |
| 287 | |
| 288 | switch (func & ~SMCCC_CONVENTION_MASK) { |
| 289 | case HF_DEBUG_LOG: |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 290 | api_debug_log(vcpu->regs.r[1], vcpu); |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 291 | return true; |
| 292 | } |
| 293 | |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 294 | /* Remaining SMC calls need to be forwarded. */ |
| 295 | return smc_forwarder(vcpu, ret); |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 296 | } |
| 297 | |
Andrew Scull | 3740287 | 2018-10-24 14:23:06 +0100 | [diff] [blame] | 298 | struct hvc_handler_return hvc_handler(uintreg_t arg0, uintreg_t arg1, |
| 299 | uintreg_t arg2, uintreg_t arg3) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 300 | { |
| 301 | struct hvc_handler_return ret; |
| 302 | |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 303 | ret.new = NULL; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 304 | |
Andrew Walbran | 3364565 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 305 | if (psci_handler(current(), arg0, arg1, arg2, arg3, &ret.user_ret, |
| 306 | &ret.new)) { |
| 307 | return ret; |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 308 | } |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 309 | |
Jose Marinho | a1dfeda | 2019-02-27 16:46:03 +0000 | [diff] [blame] | 310 | switch ((uint32_t)arg0) { |
Jose Marinho | fc0b2b6 | 2019-06-06 11:18:45 +0100 | [diff] [blame] | 311 | case SPCI_VERSION_32: |
| 312 | ret.user_ret = api_spci_version(); |
| 313 | break; |
| 314 | |
Andrew Scull | 55c4d8b | 2018-12-18 18:50:18 +0000 | [diff] [blame] | 315 | case HF_VM_GET_ID: |
| 316 | ret.user_ret = api_vm_get_id(current()); |
| 317 | break; |
| 318 | |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 319 | case HF_VM_GET_COUNT: |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 320 | ret.user_ret = api_vm_get_count(); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 321 | break; |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 322 | |
| 323 | case HF_VCPU_GET_COUNT: |
Wedson Almeida Filho | 00df6c7 | 2018-10-18 11:19:24 +0100 | [diff] [blame] | 324 | ret.user_ret = api_vcpu_get_count(arg1, current()); |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 325 | break; |
| 326 | |
| 327 | case HF_VCPU_RUN: |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 328 | ret.user_ret = hf_vcpu_run_return_encode( |
Wedson Almeida Filho | 00df6c7 | 2018-10-18 11:19:24 +0100 | [diff] [blame] | 329 | api_vcpu_run(arg1, arg2, current(), &ret.new)); |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 330 | break; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 331 | |
Jose Marinho | 135dff3 | 2019-02-28 10:25:57 +0000 | [diff] [blame] | 332 | case SPCI_YIELD_32: |
| 333 | ret.user_ret = api_spci_yield(current(), &ret.new); |
Andrew Scull | 55c4d8b | 2018-12-18 18:50:18 +0000 | [diff] [blame] | 334 | break; |
| 335 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 336 | case HF_VM_CONFIGURE: |
Wedson Almeida Filho | 00df6c7 | 2018-10-18 11:19:24 +0100 | [diff] [blame] | 337 | ret.user_ret = api_vm_configure(ipa_init(arg1), ipa_init(arg2), |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 338 | current(), &ret.new); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 339 | break; |
| 340 | |
Jose Marinho | a1dfeda | 2019-02-27 16:46:03 +0000 | [diff] [blame] | 341 | case SPCI_MSG_SEND_32: |
| 342 | ret.user_ret = api_spci_msg_send(arg1, current(), &ret.new); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 343 | break; |
| 344 | |
Jose Marinho | 3e2442f | 2019-03-12 13:30:37 +0000 | [diff] [blame] | 345 | case SPCI_MSG_RECV_32: |
| 346 | ret.user_ret = api_spci_msg_recv(arg1, current(), &ret.new); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 347 | break; |
| 348 | |
Andrew Scull | aa039b3 | 2018-10-04 15:02:26 +0100 | [diff] [blame] | 349 | case HF_MAILBOX_CLEAR: |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 350 | ret.user_ret = api_mailbox_clear(current(), &ret.new); |
| 351 | break; |
| 352 | |
| 353 | case HF_MAILBOX_WRITABLE_GET: |
| 354 | ret.user_ret = api_mailbox_writable_get(current()); |
| 355 | break; |
| 356 | |
| 357 | case HF_MAILBOX_WAITER_GET: |
| 358 | ret.user_ret = api_mailbox_waiter_get(arg1, current()); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 359 | break; |
| 360 | |
Wedson Almeida Filho | c559d13 | 2019-01-09 19:33:40 +0000 | [diff] [blame] | 361 | case HF_INTERRUPT_ENABLE: |
| 362 | ret.user_ret = api_interrupt_enable(arg1, arg2, current()); |
Andrew Walbran | 318f573 | 2018-11-20 16:23:42 +0000 | [diff] [blame] | 363 | break; |
| 364 | |
Wedson Almeida Filho | c559d13 | 2019-01-09 19:33:40 +0000 | [diff] [blame] | 365 | case HF_INTERRUPT_GET: |
| 366 | ret.user_ret = api_interrupt_get(current()); |
Andrew Walbran | 318f573 | 2018-11-20 16:23:42 +0000 | [diff] [blame] | 367 | break; |
| 368 | |
Wedson Almeida Filho | c559d13 | 2019-01-09 19:33:40 +0000 | [diff] [blame] | 369 | case HF_INTERRUPT_INJECT: |
| 370 | ret.user_ret = api_interrupt_inject(arg1, arg2, arg3, current(), |
Andrew Walbran | 318f573 | 2018-11-20 16:23:42 +0000 | [diff] [blame] | 371 | &ret.new); |
| 372 | break; |
| 373 | |
Andrew Scull | 6386f25 | 2018-12-06 13:29:10 +0000 | [diff] [blame] | 374 | case HF_SHARE_MEMORY: |
| 375 | ret.user_ret = |
| 376 | api_share_memory(arg1 >> 32, ipa_init(arg2), arg3, |
| 377 | arg1 & 0xffffffff, current()); |
| 378 | break; |
| 379 | |
Andrew Walbran | c1ad4ce | 2019-05-09 11:41:39 +0100 | [diff] [blame] | 380 | case HF_DEBUG_LOG: |
| 381 | ret.user_ret = api_debug_log(arg1, current()); |
| 382 | break; |
| 383 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 384 | default: |
| 385 | ret.user_ret = -1; |
| 386 | } |
| 387 | |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 388 | /* Set or clear VI bit. */ |
| 389 | if (ret.new == NULL) { |
| 390 | /* |
| 391 | * Not switching vCPUs, set the bit for the current vCPU |
| 392 | * directly in the register. |
| 393 | */ |
Andrew Scull | ec52ddf | 2019-08-20 10:41:01 +0100 | [diff] [blame] | 394 | struct vcpu *vcpu = current(); |
| 395 | |
| 396 | sl_lock(&vcpu->lock); |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 397 | set_virtual_interrupt_current( |
Andrew Scull | ec52ddf | 2019-08-20 10:41:01 +0100 | [diff] [blame] | 398 | vcpu->interrupts.enabled_and_pending_count > 0); |
| 399 | sl_unlock(&vcpu->lock); |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 400 | } else { |
| 401 | /* |
| 402 | * About to switch vCPUs, set the bit for the vCPU to which we |
| 403 | * are switching in the saved copy of the register. |
| 404 | */ |
Andrew Scull | ec52ddf | 2019-08-20 10:41:01 +0100 | [diff] [blame] | 405 | sl_lock(&ret.new->lock); |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 406 | set_virtual_interrupt( |
| 407 | &ret.new->regs, |
| 408 | ret.new->interrupts.enabled_and_pending_count > 0); |
Andrew Scull | ec52ddf | 2019-08-20 10:41:01 +0100 | [diff] [blame] | 409 | sl_unlock(&ret.new->lock); |
Andrew Walbran | 3d84a26 | 2018-12-13 14:41:19 +0000 | [diff] [blame] | 410 | } |
| 411 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 412 | return ret; |
| 413 | } |
| 414 | |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 415 | struct vcpu *irq_lower(void) |
| 416 | { |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 417 | /* |
| 418 | * Switch back to primary VM, interrupts will be handled there. |
| 419 | * |
| 420 | * If the VM has aborted, this vCPU will be aborted when the scheduler |
| 421 | * tries to run it again. This means the interrupt will not be delayed |
| 422 | * by the aborted VM. |
| 423 | * |
| 424 | * TODO: Only switch when the interrupt isn't for the current VM. |
| 425 | */ |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame] | 426 | return api_preempt(current()); |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 427 | } |
| 428 | |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 429 | struct vcpu *fiq_lower(void) |
| 430 | { |
| 431 | return irq_lower(); |
| 432 | } |
| 433 | |
| 434 | struct vcpu *serr_lower(void) |
| 435 | { |
| 436 | dlog("SERR from lower\n"); |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 437 | return api_abort(current()); |
Wedson Almeida Filho | 9d5040f | 2018-10-29 08:41:27 +0000 | [diff] [blame] | 438 | } |
| 439 | |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 440 | /** |
| 441 | * Initialises a fault info structure. It assumes that an FnV bit exists at |
| 442 | * bit offset 10 of the ESR, and that it is only valid when the bottom 6 bits of |
| 443 | * the ESR (the fault status code) are 010000; this is the case for both |
| 444 | * instruction and data aborts, but not necessarily for other exception reasons. |
| 445 | */ |
| 446 | static struct vcpu_fault_info fault_info_init(uintreg_t esr, |
Andrew Scull | d3cfaad | 2019-04-04 11:34:10 +0100 | [diff] [blame] | 447 | const struct vcpu *vcpu, int mode) |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 448 | { |
| 449 | uint32_t fsc = esr & 0x3f; |
| 450 | struct vcpu_fault_info r; |
| 451 | |
| 452 | r.mode = mode; |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 453 | r.pc = va_init(vcpu->regs.pc); |
| 454 | |
| 455 | /* |
| 456 | * Check the FnV bit, which is only valid if dfsc/ifsc is 010000. It |
| 457 | * indicates that we cannot rely on far_el2. |
| 458 | */ |
| 459 | if (fsc == 0x10 && esr & (1u << 10)) { |
| 460 | r.vaddr = va_init(0); |
| 461 | r.ipaddr = ipa_init(read_msr(hpfar_el2) << 8); |
| 462 | } else { |
| 463 | r.vaddr = va_init(read_msr(far_el2)); |
| 464 | r.ipaddr = ipa_init((read_msr(hpfar_el2) << 8) | |
| 465 | (read_msr(far_el2) & (PAGE_SIZE - 1))); |
| 466 | } |
| 467 | |
| 468 | return r; |
| 469 | } |
| 470 | |
Andrew Scull | 3740287 | 2018-10-24 14:23:06 +0100 | [diff] [blame] | 471 | struct vcpu *sync_lower_exception(uintreg_t esr) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 472 | { |
Wedson Almeida Filho | 00df6c7 | 2018-10-18 11:19:24 +0100 | [diff] [blame] | 473 | struct vcpu *vcpu = current(); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 474 | struct vcpu_fault_info info; |
Jose Marinho | 135dff3 | 2019-02-28 10:25:57 +0000 | [diff] [blame] | 475 | struct vcpu *new_vcpu; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 476 | |
| 477 | switch (esr >> 26) { |
| 478 | case 0x01: /* EC = 000001, WFI or WFE. */ |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 479 | /* Skip the instruction. */ |
| 480 | vcpu->regs.pc += (esr & (1u << 25)) ? 4 : 2; |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 481 | /* Check TI bit of ISS, 0 = WFI, 1 = WFE. */ |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 482 | if (esr & 1) { |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 483 | /* WFE */ |
| 484 | /* |
| 485 | * TODO: consider giving the scheduler more context, |
| 486 | * somehow. |
| 487 | */ |
Jose Marinho | 135dff3 | 2019-02-28 10:25:57 +0000 | [diff] [blame] | 488 | api_spci_yield(vcpu, &new_vcpu); |
| 489 | return new_vcpu; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 490 | } |
Andrew Walbran | 48196eb | 2019-03-04 14:56:24 +0000 | [diff] [blame] | 491 | /* WFI */ |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 492 | return api_wait_for_interrupt(vcpu); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 493 | |
| 494 | case 0x24: /* EC = 100100, Data abort. */ |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 495 | info = fault_info_init( |
Andrew Scull | d3cfaad | 2019-04-04 11:34:10 +0100 | [diff] [blame] | 496 | esr, vcpu, (esr & (1u << 6)) ? MM_MODE_W : MM_MODE_R); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 497 | if (vcpu_handle_page_fault(vcpu, &info)) { |
| 498 | return NULL; |
| 499 | } |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 500 | break; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 501 | |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 502 | case 0x20: /* EC = 100000, Instruction abort. */ |
Andrew Scull | d3cfaad | 2019-04-04 11:34:10 +0100 | [diff] [blame] | 503 | info = fault_info_init(esr, vcpu, MM_MODE_X); |
Wedson Almeida Filho | 99d2d4c | 2019-02-14 12:53:46 +0000 | [diff] [blame] | 504 | if (vcpu_handle_page_fault(vcpu, &info)) { |
| 505 | return NULL; |
| 506 | } |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 507 | break; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 508 | |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 509 | case 0x17: /* EC = 010111, SMC instruction. */ { |
| 510 | uintreg_t smc_pc = vcpu->regs.pc; |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 511 | smc_res_t ret; |
Andrew Walbran | 3364565 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 512 | struct vcpu *next = NULL; |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 513 | |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 514 | if (!smc_handler(vcpu, &ret, &next)) { |
| 515 | /* TODO(b/132421503): handle SMC forward rejection */ |
Andrew Walbran | ac5b261 | 2019-07-12 16:44:19 +0100 | [diff] [blame] | 516 | dlog("Unsupported SMC call: %#x\n", vcpu->regs.r[0]); |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 517 | ret.res0 = PSCI_ERROR_NOT_SUPPORTED; |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 518 | } |
| 519 | |
| 520 | /* Skip the SMC instruction. */ |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 521 | vcpu->regs.pc = smc_pc + (esr & (1u << 25) ? 4 : 2); |
Fuad Tabba | 8176e3e | 2019-08-01 10:40:36 +0100 | [diff] [blame] | 522 | vcpu->regs.r[0] = ret.res0; |
| 523 | vcpu->regs.r[1] = ret.res1; |
| 524 | vcpu->regs.r[2] = ret.res2; |
| 525 | vcpu->regs.r[3] = ret.res3; |
Andrew Walbran | 3364565 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 526 | return next; |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 527 | } |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 528 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 529 | default: |
Andrew Walbran | ac5b261 | 2019-07-12 16:44:19 +0100 | [diff] [blame] | 530 | dlog("Unknown lower sync exception pc=%#x, esr=%#x, " |
| 531 | "ec=%#x\n", |
Andrew Scull | 4f170f5 | 2018-07-19 12:58:20 +0100 | [diff] [blame] | 532 | vcpu->regs.pc, esr, esr >> 26); |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 533 | break; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 534 | } |
| 535 | |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 536 | /* The exception wasn't handled so abort the VM. */ |
| 537 | return api_abort(vcpu); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 538 | } |