Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2024 The Hafnium Authors. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
| 7 | */ |
| 8 | |
| 9 | #include "hf/plat/interrupts.h" |
| 10 | |
| 11 | #include "hf/arch/gicv3.h" |
| 12 | #include "hf/arch/host_timer.h" |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 13 | |
| 14 | #include "hf/api.h" |
| 15 | #include "hf/check.h" |
Karl Meakin | fa1dcb8 | 2025-02-10 16:47:50 +0000 | [diff] [blame] | 16 | #include "hf/ffa/direct_messaging.h" |
J-Alves | ce42f7a | 2025-02-10 13:57:41 +0000 | [diff] [blame^] | 17 | #include "hf/ffa/notifications.h" |
Karl Meakin | 902af08 | 2024-11-28 14:58:38 +0000 | [diff] [blame] | 18 | #include "hf/ffa/vm.h" |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 19 | #include "hf/hf_ipi.h" |
| 20 | #include "hf/vm.h" |
| 21 | |
| 22 | /** |
Daniel Boulby | aa386fd | 2025-02-07 15:01:20 +0000 | [diff] [blame] | 23 | * This function has been deprecated and it's contents moved into |
| 24 | * api_interrupt_get in order to align the bitmap and queue for tracking |
| 25 | * interupts. |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 26 | * Returns 0 on success, or -1 otherwise. |
| 27 | */ |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 28 | int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id, |
| 29 | struct vcpu *current) |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 30 | { |
Daniel Boulby | 3c1506b | 2025-02-25 10:49:51 +0000 | [diff] [blame] | 31 | (void)pint_id; |
| 32 | (void)vint_id; |
Daniel Boulby | aa386fd | 2025-02-07 15:01:20 +0000 | [diff] [blame] | 33 | (void)current; |
| 34 | return 0; |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 35 | } |
| 36 | |
Karl Meakin | ca38ef9 | 2025-02-13 14:20:23 +0000 | [diff] [blame] | 37 | static struct vcpu *ffa_interrupts_find_target_vcpu_secure_interrupt( |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 38 | struct vcpu *current, uint32_t interrupt_id) |
| 39 | { |
| 40 | /* |
| 41 | * Find which VM/SP owns this interrupt. We then find the |
| 42 | * corresponding vCPU context for this CPU. |
| 43 | */ |
| 44 | for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) { |
| 45 | struct vm *vm = vm_find_index(index); |
| 46 | |
| 47 | for (uint32_t j = 0; j < HF_NUM_INTIDS; j++) { |
| 48 | struct interrupt_descriptor int_desc = |
| 49 | vm->interrupt_desc[j]; |
| 50 | |
| 51 | /* |
| 52 | * Interrupt descriptors are populated |
| 53 | * contiguously. |
| 54 | */ |
| 55 | if (!int_desc.valid) { |
| 56 | break; |
| 57 | } |
| 58 | if (int_desc.interrupt_id == interrupt_id) { |
| 59 | return api_ffa_get_vm_vcpu(vm, current); |
| 60 | } |
| 61 | } |
| 62 | } |
| 63 | |
| 64 | return NULL; |
| 65 | } |
| 66 | |
Karl Meakin | ca38ef9 | 2025-02-13 14:20:23 +0000 | [diff] [blame] | 67 | static struct vcpu *ffa_interrupts_find_target_vcpu(struct vcpu *current, |
J-Alves | de21178 | 2025-02-07 14:44:39 +0000 | [diff] [blame] | 68 | uint32_t interrupt_id, |
| 69 | uint32_t *v_intid) |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 70 | { |
| 71 | struct vcpu *target_vcpu; |
| 72 | |
J-Alves | de21178 | 2025-02-07 14:44:39 +0000 | [diff] [blame] | 73 | assert(current != NULL); |
| 74 | assert(v_intid != NULL); |
| 75 | |
| 76 | *v_intid = interrupt_id; |
| 77 | |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 78 | switch (interrupt_id) { |
J-Alves | de21178 | 2025-02-07 14:44:39 +0000 | [diff] [blame] | 79 | case SPURIOUS_INTID_OTHER_WORLD: |
| 80 | /* |
| 81 | * Spurious interrupt ID indicating that there are no pending |
| 82 | * interrupts to acknowledge. For such scenarios, resume the |
| 83 | * current vCPU. |
| 84 | */ |
| 85 | target_vcpu = NULL; |
| 86 | break; |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 87 | case HF_IPI_INTID: |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 88 | /* |
| 89 | * Get the next vCPU with a pending IPI. If all vCPUs |
| 90 | * have had their IPIs handled this will return NULL. |
| 91 | */ |
| 92 | target_vcpu = hf_ipi_get_pending_target_vcpu(current); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 93 | break; |
J-Alves | de21178 | 2025-02-07 14:44:39 +0000 | [diff] [blame] | 94 | case ARM_SEL2_TIMER_PHYS_INT: |
| 95 | /* Disable the S-EL2 physical timer */ |
| 96 | host_timer_disable(); |
| 97 | target_vcpu = timer_find_target_vcpu(current); |
| 98 | |
| 99 | if (target_vcpu != NULL) { |
| 100 | *v_intid = HF_VIRTUAL_TIMER_INTID; |
| 101 | } |
| 102 | /* |
| 103 | * It is possible for target_vcpu to be NULL in case of spurious |
| 104 | * timer interrupt. |
| 105 | */ |
| 106 | break; |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 107 | case ARM_EL1_VIRT_TIMER_PHYS_INT: |
| 108 | /* Fall through */ |
| 109 | case ARM_EL1_PHYS_TIMER_PHYS_INT: |
| 110 | panic("Timer interrupt not expected to fire: %u\n", |
| 111 | interrupt_id); |
| 112 | default: |
Karl Meakin | ca38ef9 | 2025-02-13 14:20:23 +0000 | [diff] [blame] | 113 | target_vcpu = ffa_interrupts_find_target_vcpu_secure_interrupt( |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 114 | current, interrupt_id); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 115 | |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 116 | /* The target vCPU for a secure interrupt cannot be NULL. */ |
| 117 | CHECK(target_vcpu != NULL); |
| 118 | } |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 119 | |
| 120 | return target_vcpu; |
| 121 | } |
| 122 | |
| 123 | /* |
Daniel Boulby | 3c1506b | 2025-02-25 10:49:51 +0000 | [diff] [blame] | 124 | * If the current vCPU is being preempted, record this in the target vCPU |
| 125 | * and set the current states to VCPU_STATE_PREEMPTED. |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 126 | */ |
Daniel Boulby | 3c1506b | 2025-02-25 10:49:51 +0000 | [diff] [blame] | 127 | static void ffa_interrupts_set_preempted_vcpu( |
| 128 | struct vcpu_locked target_vcpu_locked, |
| 129 | struct vcpu_locked current_locked) |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 130 | { |
| 131 | struct vcpu *target_vcpu = target_vcpu_locked.vcpu; |
| 132 | struct vcpu *preempted_vcpu = current_locked.vcpu; |
| 133 | |
J-Alves | ac62a85 | 2025-02-07 19:03:07 +0000 | [diff] [blame] | 134 | assert(target_vcpu != NULL); |
| 135 | assert(preempted_vcpu != NULL); |
| 136 | |
| 137 | target_vcpu->preempted_vcpu = preempted_vcpu; |
| 138 | preempted_vcpu->state = VCPU_STATE_PREEMPTED; |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | /** |
Karl Meakin | fa1dcb8 | 2025-02-10 16:47:50 +0000 | [diff] [blame] | 142 | * If the interrupts were indeed masked by SPMC before an SP's vCPU was resumed, |
| 143 | * restore the priority mask thereby allowing the interrupts to be delivered. |
| 144 | */ |
| 145 | void ffa_interrupts_unmask(struct vcpu *current) |
| 146 | { |
| 147 | plat_interrupts_set_priority_mask(current->prev_interrupt_priority); |
| 148 | } |
| 149 | |
| 150 | /** |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 151 | * Enforce action of an SP in response to non-secure or other-secure interrupt |
| 152 | * by changing the priority mask. Effectively, physical interrupts shall not |
| 153 | * trigger which has the same effect as queueing interrupts. |
| 154 | */ |
Karl Meakin | fa1dcb8 | 2025-02-10 16:47:50 +0000 | [diff] [blame] | 155 | void ffa_interrupts_mask(struct vcpu_locked receiver_vcpu_locked) |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 156 | { |
| 157 | struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu; |
| 158 | uint8_t current_priority; |
| 159 | |
| 160 | /* Save current value of priority mask. */ |
| 161 | current_priority = plat_interrupts_get_priority_mask(); |
| 162 | receiver_vcpu->prev_interrupt_priority = current_priority; |
| 163 | |
| 164 | if (receiver_vcpu->vm->other_s_interrupts_action == |
| 165 | OTHER_S_INT_ACTION_QUEUED || |
| 166 | receiver_vcpu->scheduling_mode == SPMC_MODE) { |
| 167 | /* |
| 168 | * If secure interrupts not masked yet, mask them now. We could |
| 169 | * enter SPMC scheduled mode when an EL3 SPMD Logical partition |
| 170 | * sends a direct request, and we are making the IMPDEF choice |
| 171 | * to mask interrupts when such a situation occurs. This keeps |
| 172 | * design simple. |
| 173 | */ |
| 174 | if (current_priority > SWD_MASK_ALL_INT) { |
| 175 | plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT); |
| 176 | } |
| 177 | } else if (receiver_vcpu->vm->ns_interrupts_action == |
| 178 | NS_ACTION_QUEUED) { |
| 179 | /* If non secure interrupts not masked yet, mask them now. */ |
| 180 | if (current_priority > SWD_MASK_NS_INT) { |
| 181 | plat_interrupts_set_priority_mask(SWD_MASK_NS_INT); |
| 182 | } |
| 183 | } |
| 184 | } |
| 185 | |
J-Alves | 2016060 | 2025-02-07 17:46:22 +0000 | [diff] [blame] | 186 | static struct vcpu *interrupt_resume_waiting( |
| 187 | struct vcpu_locked current_locked, |
| 188 | struct vcpu_locked target_vcpu_locked, uint32_t v_intid) |
| 189 | { |
| 190 | struct vcpu *next = NULL; |
| 191 | struct ffa_value ret_interrupt = api_ffa_interrupt_return(v_intid); |
| 192 | struct vcpu *target_vcpu = target_vcpu_locked.vcpu; |
| 193 | |
| 194 | /* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */ |
| 195 | vcpu_enter_secure_interrupt_rtm(target_vcpu_locked); |
| 196 | ffa_interrupts_mask(target_vcpu_locked); |
| 197 | |
| 198 | if (target_vcpu_locked.vcpu->vm->el0_partition) { |
| 199 | /* |
| 200 | * Since S-EL0 partitions will not receive the interrupt through |
| 201 | * a vIRQ signal in addition to the FFA_INTERRUPT ERET, make the |
| 202 | * interrupt no longer pending at this point. |
| 203 | */ |
| 204 | uint32_t pending_intid = |
| 205 | vcpu_virt_interrupt_get_pending_and_enabled( |
| 206 | target_vcpu_locked); |
| 207 | assert(pending_intid == v_intid); |
| 208 | } |
| 209 | |
| 210 | /* |
| 211 | * Ideally, we have to mask non-secure interrupts here |
| 212 | * since the spec mandates that SPMC should make sure |
| 213 | * SPMC scheduled call chain cannot be preempted by a |
| 214 | * non-secure interrupt. However, our current design |
| 215 | * takes care of it implicitly. |
| 216 | */ |
| 217 | vcpu_set_running(target_vcpu_locked, &ret_interrupt); |
| 218 | |
| 219 | ffa_interrupts_set_preempted_vcpu(target_vcpu_locked, current_locked); |
| 220 | |
| 221 | next = target_vcpu; |
| 222 | |
| 223 | if (target_vcpu->cpu != current_locked.vcpu->cpu) { |
| 224 | /* |
| 225 | * The target vcpu could have migrated to a different |
| 226 | * physical CPU. SPMC will migrate it to current |
| 227 | * physical CPU and resume it. |
| 228 | */ |
| 229 | assert(target_vcpu->vm->vcpu_count == 1); |
| 230 | target_vcpu->cpu = current_locked.vcpu->cpu; |
| 231 | } |
| 232 | |
| 233 | return next; |
| 234 | } |
| 235 | |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 236 | /** |
J-Alves | ac62a85 | 2025-02-07 19:03:07 +0000 | [diff] [blame] | 237 | * Handles the secure interrupt according to the target vCPU's state. |
| 238 | * Returns the next vCPU to resume accordingly. |
| 239 | * If it returns NULL, the current vCPU shall be resumed. |
| 240 | * This might be if the target vCPU is the current vCPU, or if the |
| 241 | * target vCPU is not in a state in which it can be resumed to handle |
| 242 | * the secure interrupt. |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 243 | */ |
J-Alves | ac62a85 | 2025-02-07 19:03:07 +0000 | [diff] [blame] | 244 | static struct vcpu *ffa_interrupts_signal_secure_interrupt( |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 245 | struct vcpu_locked current_locked, |
| 246 | struct vcpu_locked target_vcpu_locked, uint32_t v_intid) |
| 247 | { |
| 248 | struct vcpu *target_vcpu = target_vcpu_locked.vcpu; |
| 249 | struct vcpu *current = current_locked.vcpu; |
| 250 | struct vcpu *next = NULL; |
| 251 | |
J-Alves | 7e7fce0 | 2025-02-07 15:14:56 +0000 | [diff] [blame] | 252 | /* |
| 253 | * The target vcpu has migrated to a different physical |
| 254 | * CPU. Hence, it cannot be resumed on this CPU, SPMC |
| 255 | * resumes current vCPU. |
| 256 | */ |
| 257 | if (target_vcpu->cpu != current_locked.vcpu->cpu) { |
| 258 | assert(target_vcpu->vm->vcpu_count == 1); |
| 259 | } |
| 260 | |
J-Alves | ac62a85 | 2025-02-07 19:03:07 +0000 | [diff] [blame] | 261 | /* Secure interrupt signaling and queuing for SP. */ |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 262 | switch (target_vcpu->state) { |
J-Alves | 2016060 | 2025-02-07 17:46:22 +0000 | [diff] [blame] | 263 | case VCPU_STATE_WAITING: |
J-Alves | ce42f7a | 2025-02-10 13:57:41 +0000 | [diff] [blame^] | 264 | if (!target_vcpu->vm->sri_policy.intr_while_waiting) { |
| 265 | next = interrupt_resume_waiting( |
| 266 | current_locked, target_vcpu_locked, v_intid); |
| 267 | } else { |
| 268 | dlog_verbose( |
| 269 | "%s: SP is waiting, SRI delayed due to " |
| 270 | "interrupt. Partition %x, vcpu %x, interrupt " |
| 271 | "%x\n", |
| 272 | __func__, target_vcpu->vm->id, |
| 273 | vcpu_index(target_vcpu), v_intid); |
| 274 | ffa_notifications_sri_set_delayed(target_vcpu->cpu); |
| 275 | } |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 276 | break; |
| 277 | case VCPU_STATE_BLOCKED: |
J-Alves | ac62a85 | 2025-02-07 19:03:07 +0000 | [diff] [blame] | 278 | if (!target_vcpu->vm->el0_partition && |
| 279 | target_vcpu->cpu == current_locked.vcpu->cpu && |
J-Alves | 7e7fce0 | 2025-02-07 15:14:56 +0000 | [diff] [blame] | 280 | ffa_direct_msg_precedes_in_call_chain(current_locked, |
| 281 | target_vcpu_locked)) { |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 282 | struct ffa_value ret_interrupt = |
| 283 | api_ffa_interrupt_return(0); |
| 284 | |
| 285 | /* |
| 286 | * If the target vCPU ran earlier in the same call |
| 287 | * chain as the current vCPU, SPMC leaves all |
| 288 | * intermediate execution contexts in blocked state and |
| 289 | * resumes the target vCPU for handling secure |
| 290 | * interrupt. |
| 291 | * Under the current design, there is only one possible |
| 292 | * scenario in which this could happen: both the |
| 293 | * preempted (i.e. current) and target vCPU are in the |
| 294 | * same NWd scheduled call chain and is described in the |
| 295 | * Scenario 1 of Table 8.4 in EAC0 spec. |
| 296 | */ |
| 297 | assert(current_locked.vcpu->scheduling_mode == |
| 298 | NWD_MODE); |
| 299 | assert(target_vcpu->scheduling_mode == NWD_MODE); |
| 300 | |
| 301 | /* |
| 302 | * The execution preempted the call chain that involved |
| 303 | * the targeted and the current SPs. |
| 304 | * The targetted SP is set running, whilst the |
| 305 | * preempted SP is set PREEMPTED. |
| 306 | */ |
| 307 | vcpu_set_running(target_vcpu_locked, &ret_interrupt); |
| 308 | |
Daniel Boulby | 3c1506b | 2025-02-25 10:49:51 +0000 | [diff] [blame] | 309 | ffa_interrupts_set_preempted_vcpu(target_vcpu_locked, |
| 310 | current_locked); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 311 | next = target_vcpu; |
J-Alves | ac62a85 | 2025-02-07 19:03:07 +0000 | [diff] [blame] | 312 | break; |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 313 | } |
J-Alves | ac62a85 | 2025-02-07 19:03:07 +0000 | [diff] [blame] | 314 | |
| 315 | /* |
| 316 | * `next` is NULL. |
| 317 | * Either: |
| 318 | * - EL0 paritition can't be resumed when in blocked state. |
| 319 | * - The target vCPU has migrated to a different |
| 320 | * physical CPU. Hence, it cannot be resumed on this |
| 321 | * CPU, SPMC resumes current vCPU. |
| 322 | * - The target vCPU cannot be resumed now because it is |
| 323 | * in BLOCKED state (it yielded CPU cycles using |
| 324 | * FFA_YIELD). SPMC queues the virtual interrupt and |
| 325 | * resumes the current vCPU which could belong to either |
| 326 | * a VM or a SP. |
| 327 | */ |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 328 | break; |
| 329 | case VCPU_STATE_PREEMPTED: |
J-Alves | 7e7fce0 | 2025-02-07 15:14:56 +0000 | [diff] [blame] | 330 | /* |
| 331 | * We do not resume a target vCPU that has been already |
| 332 | * pre-empted by an interrupt. Make the vIRQ pending for |
| 333 | * target SP(i.e., queue the interrupt) and continue to |
| 334 | * resume current vCPU. Refer to section 8.3.2.1 bullet |
| 335 | * 3 in the FF-A v1.1 EAC0 spec. |
| 336 | */ |
J-Alves | ac62a85 | 2025-02-07 19:03:07 +0000 | [diff] [blame] | 337 | if (!target_vcpu->vm->el0_partition && |
| 338 | target_vcpu->cpu == current_locked.vcpu->cpu && |
J-Alves | 7e7fce0 | 2025-02-07 15:14:56 +0000 | [diff] [blame] | 339 | current->vm->id == HF_OTHER_WORLD_ID) { |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 340 | /* |
J-Alves | 7e7fce0 | 2025-02-07 15:14:56 +0000 | [diff] [blame] | 341 | * The target vCPU must have been preempted by a |
| 342 | * non secure interrupt. It could not have been |
| 343 | * preempted by a secure interrupt as current |
| 344 | * SPMC implementation does not allow secure |
| 345 | * interrupt prioritization. Moreover, the |
| 346 | * target vCPU should have been in Normal World |
| 347 | * scheduled mode as SPMC scheduled mode call |
| 348 | * chain cannot be preempted by a non secure |
| 349 | * interrupt. |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 350 | */ |
J-Alves | 7e7fce0 | 2025-02-07 15:14:56 +0000 | [diff] [blame] | 351 | CHECK(target_vcpu->scheduling_mode == NWD_MODE); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 352 | } |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 353 | break; |
| 354 | case VCPU_STATE_RUNNING: |
J-Alves | ac62a85 | 2025-02-07 19:03:07 +0000 | [diff] [blame] | 355 | /* |
| 356 | * Interrupt has been injected in the vCPU state. |
| 357 | */ |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 358 | break; |
| 359 | case VCPU_STATE_BLOCKED_INTERRUPT: |
| 360 | /* WFI is no-op for SP. Fall through. */ |
| 361 | default: |
| 362 | /* |
| 363 | * vCPU of Target SP cannot be in OFF/ABORTED state if it has |
| 364 | * to handle secure interrupt. |
| 365 | */ |
| 366 | panic("Secure interrupt cannot be signaled to target SP\n"); |
| 367 | break; |
| 368 | } |
| 369 | |
| 370 | return next; |
| 371 | } |
| 372 | |
| 373 | /** |
| 374 | * Obtain the physical interrupt that triggered from the interrupt controller, |
| 375 | * and inject the corresponding virtual interrupt to the target vCPU. |
| 376 | * When PEs executing in the Normal World, and secure interrupts trigger, |
| 377 | * execution is trapped into EL3. SPMD then routes the interrupt to SPMC |
| 378 | * through FFA_INTERRUPT_32 ABI synchronously using eret conduit. |
| 379 | */ |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 380 | void ffa_interrupts_handle_secure_interrupt(struct vcpu *current, |
| 381 | struct vcpu **next) |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 382 | { |
| 383 | struct vcpu *target_vcpu; |
| 384 | struct vcpu_locked target_vcpu_locked = |
| 385 | (struct vcpu_locked){.vcpu = NULL}; |
| 386 | struct vcpu_locked current_locked; |
| 387 | uint32_t intid; |
| 388 | struct vm_locked target_vm_locked; |
| 389 | uint32_t v_intid; |
| 390 | |
| 391 | /* Find pending interrupt id. This also activates the interrupt. */ |
| 392 | intid = plat_interrupts_get_pending_interrupt_id(); |
| 393 | v_intid = intid; |
| 394 | |
J-Alves | de21178 | 2025-02-07 14:44:39 +0000 | [diff] [blame] | 395 | /* Get the target vCPU and get the virtual interrupt ID. */ |
| 396 | target_vcpu = ffa_interrupts_find_target_vcpu(current, intid, &v_intid); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 397 | |
| 398 | /* |
J-Alves | de21178 | 2025-02-07 14:44:39 +0000 | [diff] [blame] | 399 | * Spurious interrupt ID indicates there is no pending interrupt to |
| 400 | * acknowledge so we do not need to call end of interrupt. |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 401 | */ |
J-Alves | de21178 | 2025-02-07 14:44:39 +0000 | [diff] [blame] | 402 | if (v_intid != SPURIOUS_INTID_OTHER_WORLD) { |
| 403 | /* |
| 404 | * End the interrupt to drop the running priority. It also |
| 405 | * deactivates the physical interrupt. If not, the interrupt |
| 406 | * could trigger again after resuming current vCPU. |
| 407 | */ |
| 408 | plat_interrupts_end_of_interrupt(intid); |
| 409 | } |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 410 | |
Daniel Boulby | 7011b5a | 2024-10-15 18:27:26 +0100 | [diff] [blame] | 411 | if (target_vcpu == NULL) { |
| 412 | /* No further handling required. Resume the current vCPU. */ |
| 413 | *next = NULL; |
| 414 | return; |
| 415 | } |
| 416 | |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 417 | target_vm_locked = vm_lock(target_vcpu->vm); |
| 418 | |
| 419 | if (target_vcpu == current) { |
| 420 | current_locked = vcpu_lock(current); |
| 421 | target_vcpu_locked = current_locked; |
| 422 | } else { |
| 423 | struct two_vcpu_locked vcpus_locked; |
| 424 | /* Lock both vCPUs at once to avoid deadlock. */ |
| 425 | vcpus_locked = vcpu_lock_both(current, target_vcpu); |
| 426 | current_locked = vcpus_locked.vcpu1; |
| 427 | target_vcpu_locked = vcpus_locked.vcpu2; |
| 428 | } |
| 429 | |
| 430 | /* |
| 431 | * A race condition can occur with the execution contexts belonging to |
| 432 | * an MP SP. An interrupt targeting the execution context on present |
| 433 | * core can trigger while the execution context of this SP on a |
| 434 | * different core is being aborted. In such scenario, the physical |
| 435 | * interrupts beloning to the aborted SP are disabled and the current |
| 436 | * execution context is resumed. |
| 437 | */ |
| 438 | if (target_vcpu->state == VCPU_STATE_ABORTED || |
| 439 | atomic_load_explicit(&target_vcpu->vm->aborting, |
| 440 | memory_order_relaxed)) { |
| 441 | /* Clear fields corresponding to secure interrupt handling. */ |
| 442 | vcpu_secure_interrupt_complete(target_vcpu_locked); |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 443 | ffa_vm_disable_interrupts(target_vm_locked); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 444 | |
| 445 | /* Resume current vCPU. */ |
| 446 | *next = NULL; |
| 447 | } else { |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 448 | /* Set the interrupt pending in the target vCPU. */ |
Daniel Boulby | 3c1506b | 2025-02-25 10:49:51 +0000 | [diff] [blame] | 449 | vcpu_virt_interrupt_inject(target_vcpu_locked, v_intid); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 450 | |
| 451 | switch (intid) { |
| 452 | case HF_IPI_INTID: |
| 453 | if (hf_ipi_handle(target_vcpu_locked)) { |
| 454 | *next = NULL; |
| 455 | break; |
| 456 | } |
| 457 | /* |
| 458 | * Fall through in the case handling has not been fully |
| 459 | * completed. |
| 460 | */ |
| 461 | default: |
| 462 | /* |
| 463 | * Either invoke the handler related to partitions from |
| 464 | * S-EL0 or from S-EL1. |
| 465 | */ |
J-Alves | ac62a85 | 2025-02-07 19:03:07 +0000 | [diff] [blame] | 466 | *next = ffa_interrupts_signal_secure_interrupt( |
| 467 | current_locked, target_vcpu_locked, v_intid); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 468 | } |
| 469 | } |
| 470 | |
| 471 | if (target_vcpu_locked.vcpu != NULL) { |
| 472 | vcpu_unlock(&target_vcpu_locked); |
| 473 | } |
| 474 | |
| 475 | vcpu_unlock(¤t_locked); |
| 476 | vm_unlock(&target_vm_locked); |
| 477 | } |
| 478 | |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 479 | bool ffa_interrupts_inject_notification_pending_interrupt( |
Daniel Boulby | d49d077 | 2025-01-15 11:19:36 +0000 | [diff] [blame] | 480 | struct vcpu_locked target_locked, struct vm_locked receiver_locked) |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 481 | { |
| 482 | struct vm *next_vm = target_locked.vcpu->vm; |
| 483 | bool ret = false; |
| 484 | |
| 485 | /* |
| 486 | * Inject the NPI if: |
| 487 | * - The targeted VM ID is from this world (i.e. if it is an SP). |
Daniel Boulby | 6e04611 | 2025-02-25 17:33:48 +0000 | [diff] [blame] | 488 | * - The partition has global pending notifications or there are |
| 489 | * pending per-vCPU notifications in the next vCPU. |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 490 | */ |
| 491 | if (vm_id_is_current_world(next_vm->id) && |
| 492 | (vm_are_per_vcpu_notifications_pending( |
| 493 | receiver_locked, vcpu_index(target_locked.vcpu)) || |
Daniel Boulby | 6e04611 | 2025-02-25 17:33:48 +0000 | [diff] [blame] | 494 | vm_are_global_notifications_pending(receiver_locked))) { |
Daniel Boulby | 3c1506b | 2025-02-25 10:49:51 +0000 | [diff] [blame] | 495 | vcpu_virt_interrupt_inject(target_locked, |
| 496 | HF_NOTIFICATION_PENDING_INTID); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 497 | ret = true; |
| 498 | } |
| 499 | |
| 500 | return ret; |
| 501 | } |
| 502 | |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 503 | struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu) |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 504 | { |
| 505 | struct vcpu *next; |
| 506 | struct two_vcpu_locked both_vcpu_locked; |
| 507 | |
| 508 | /* |
| 509 | * The action specified by SP in its manifest is ``Non-secure interrupt |
| 510 | * is signaled``. Refer to section 8.2.4 rules and guidelines bullet 4. |
| 511 | * Hence, the call chain starts unwinding. The current vCPU must have |
| 512 | * been a part of NWd scheduled call chain. Therefore, it is pre-empted |
| 513 | * and execution is either handed back to the normal world or to the |
| 514 | * previous SP vCPU in the call chain through the FFA_INTERRUPT ABI. |
| 515 | * The api_preempt() call is equivalent to calling |
| 516 | * api_switch_to_other_world for current vCPU passing FFA_INTERRUPT. The |
| 517 | * SP can be resumed later by FFA_RUN. |
| 518 | */ |
| 519 | CHECK(current_vcpu->scheduling_mode == NWD_MODE); |
| 520 | assert(current_vcpu->call_chain.next_node == NULL); |
| 521 | |
| 522 | if (current_vcpu->call_chain.prev_node == NULL) { |
| 523 | /* End of NWd scheduled call chain */ |
| 524 | return api_preempt(current_vcpu); |
| 525 | } |
| 526 | |
| 527 | next = current_vcpu->call_chain.prev_node; |
| 528 | CHECK(next != NULL); |
| 529 | |
| 530 | /* |
| 531 | * Lock both vCPUs. Strictly speaking, it may not be necessary since |
| 532 | * next is guaranteed to be in BLOCKED state as it is the predecessor of |
| 533 | * the current vCPU in the present call chain. |
| 534 | */ |
| 535 | both_vcpu_locked = vcpu_lock_both(current_vcpu, next); |
| 536 | |
| 537 | /* Removing a node from an existing call chain. */ |
| 538 | current_vcpu->call_chain.prev_node = NULL; |
| 539 | current_vcpu->state = VCPU_STATE_PREEMPTED; |
| 540 | |
| 541 | /* |
| 542 | * SPMC applies the runtime model till when the vCPU transitions from |
| 543 | * running to waiting state. Moreover, the SP continues to remain in |
| 544 | * its CPU cycle allocation mode. Hence, rt_model and scheduling_mode |
| 545 | * are not changed here. |
| 546 | */ |
| 547 | assert(next->state == VCPU_STATE_BLOCKED); |
| 548 | assert(next->call_chain.next_node == current_vcpu); |
| 549 | |
| 550 | next->call_chain.next_node = NULL; |
| 551 | |
| 552 | vcpu_set_running(both_vcpu_locked.vcpu2, |
| 553 | &(struct ffa_value){ |
| 554 | .func = FFA_INTERRUPT_32, |
| 555 | .arg1 = ffa_vm_vcpu(current_vcpu->vm->id, |
| 556 | vcpu_index(current_vcpu)), |
| 557 | }); |
| 558 | |
| 559 | sl_unlock(&next->lock); |
| 560 | sl_unlock(¤t_vcpu->lock); |
| 561 | |
| 562 | return next; |
| 563 | } |
| 564 | |
Karl Meakin | ca38ef9 | 2025-02-13 14:20:23 +0000 | [diff] [blame] | 565 | static void ffa_interrupts_enable_virtual_maintenance_interrupts( |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 566 | struct vcpu_locked current_locked) |
| 567 | { |
| 568 | struct vcpu *current; |
| 569 | struct interrupts *interrupts; |
| 570 | struct vm *vm; |
| 571 | |
| 572 | current = current_locked.vcpu; |
| 573 | interrupts = ¤t->interrupts; |
| 574 | vm = current->vm; |
| 575 | |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 576 | if (ffa_vm_managed_exit_supported(vm)) { |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame] | 577 | vcpu_virt_interrupt_enable(current_locked, |
| 578 | HF_MANAGED_EXIT_INTID, true); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 579 | /* |
| 580 | * SPMC decides the interrupt type for Managed exit signal based |
| 581 | * on the partition manifest. |
| 582 | */ |
| 583 | if (vm->me_signal_virq) { |
| 584 | vcpu_virt_interrupt_set_type(interrupts, |
| 585 | HF_MANAGED_EXIT_INTID, |
| 586 | INTERRUPT_TYPE_IRQ); |
| 587 | } else { |
| 588 | vcpu_virt_interrupt_set_type(interrupts, |
| 589 | HF_MANAGED_EXIT_INTID, |
| 590 | INTERRUPT_TYPE_FIQ); |
| 591 | } |
| 592 | } |
| 593 | |
| 594 | if (vm->notifications.enabled) { |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame] | 595 | vcpu_virt_interrupt_enable(current_locked, |
| 596 | HF_NOTIFICATION_PENDING_INTID, true); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 597 | } |
| 598 | } |
| 599 | |
| 600 | /** |
| 601 | * Enable relevant virtual interrupts for Secure Partitions. |
| 602 | * For all SPs, any applicable virtual maintenance interrupts are enabled. |
| 603 | * Additionally, for S-EL0 partitions, all the interrupts declared in the |
| 604 | * partition manifest are enabled at the virtual interrupt controller |
| 605 | * interface early during the boot stage as an S-EL0 SP need not call |
| 606 | * HF_INTERRUPT_ENABLE hypervisor ABI explicitly. |
| 607 | */ |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 608 | void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked, |
| 609 | struct vm_locked vm_locked) |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 610 | { |
| 611 | struct vcpu *current; |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 612 | struct vm *vm; |
| 613 | |
| 614 | current = current_locked.vcpu; |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 615 | vm = current->vm; |
| 616 | assert(vm == vm_locked.vm); |
| 617 | |
| 618 | if (vm->el0_partition) { |
| 619 | for (uint32_t k = 0; k < VM_MANIFEST_MAX_INTERRUPTS; k++) { |
| 620 | struct interrupt_descriptor int_desc; |
| 621 | |
| 622 | int_desc = vm_locked.vm->interrupt_desc[k]; |
| 623 | |
| 624 | /* Interrupt descriptors are populated contiguously. */ |
| 625 | if (!int_desc.valid) { |
| 626 | break; |
| 627 | } |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame] | 628 | vcpu_virt_interrupt_enable(current_locked, |
| 629 | int_desc.interrupt_id, true); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 630 | } |
| 631 | } |
| 632 | |
Karl Meakin | ca38ef9 | 2025-02-13 14:20:23 +0000 | [diff] [blame] | 633 | ffa_interrupts_enable_virtual_maintenance_interrupts(current_locked); |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 634 | } |
| 635 | |
| 636 | /** |
| 637 | * Reconfigure the interrupt belonging to the current partition at runtime. |
| 638 | * At present, this paravirtualized interface only allows the following |
| 639 | * commands which signify what change is being requested by the current |
| 640 | * partition: |
| 641 | * - Change the target CPU of the interrupt. |
| 642 | * - Change the security state of the interrupt. |
| 643 | * - Enable or disable the physical interrupt. |
| 644 | */ |
Karl Meakin | 117c808 | 2024-12-04 16:03:28 +0000 | [diff] [blame] | 645 | int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command, |
| 646 | uint32_t value, struct vcpu *current) |
Karl Meakin | 8e58ddc | 2024-11-08 23:19:34 +0000 | [diff] [blame] | 647 | { |
| 648 | struct vm *vm = current->vm; |
| 649 | struct vm_locked vm_locked; |
| 650 | int64_t ret = -1; |
| 651 | struct interrupt_descriptor *int_desc = NULL; |
| 652 | |
| 653 | /* |
| 654 | * Lock VM to protect interrupt descriptor from being modified |
| 655 | * concurrently. |
| 656 | */ |
| 657 | vm_locked = vm_lock(vm); |
| 658 | |
| 659 | switch (command) { |
| 660 | case INT_RECONFIGURE_TARGET_PE: |
| 661 | /* Here, value represents the target PE index. */ |
| 662 | if (value >= MAX_CPUS) { |
| 663 | dlog_verbose( |
| 664 | "Illegal target PE index specified while " |
| 665 | "reconfiguring interrupt %x\n", |
| 666 | int_id); |
| 667 | goto out_unlock; |
| 668 | } |
| 669 | |
| 670 | /* |
| 671 | * An UP SP cannot reconfigure an interrupt to be targetted to |
| 672 | * any other physical CPU except the one it is currently |
| 673 | * running on. |
| 674 | */ |
| 675 | if (vm_is_up(vm) && value != cpu_index(current->cpu)) { |
| 676 | dlog_verbose( |
| 677 | "Illegal target PE index specified by current " |
| 678 | "UP SP\n"); |
| 679 | goto out_unlock; |
| 680 | } |
| 681 | |
| 682 | /* Configure the interrupt to be routed to a specific CPU. */ |
| 683 | int_desc = vm_interrupt_set_target_mpidr( |
| 684 | vm_locked, int_id, cpu_find_index(value)->id); |
| 685 | break; |
| 686 | case INT_RECONFIGURE_SEC_STATE: |
| 687 | /* Specify the new security state of the interrupt. */ |
| 688 | if (value != INT_DESC_SEC_STATE_NS && |
| 689 | value != INT_DESC_SEC_STATE_S) { |
| 690 | dlog_verbose( |
| 691 | "Illegal value %x specified while " |
| 692 | "reconfiguring interrupt %x\n", |
| 693 | value, int_id); |
| 694 | goto out_unlock; |
| 695 | } |
| 696 | int_desc = vm_interrupt_set_sec_state(vm_locked, int_id, value); |
| 697 | break; |
| 698 | case INT_RECONFIGURE_ENABLE: |
| 699 | /* Enable or disable the interrupt. */ |
| 700 | if (value != INT_DISABLE && value != INT_ENABLE) { |
| 701 | dlog_verbose( |
| 702 | "Illegal value %x specified while " |
| 703 | "reconfiguring interrupt %x\n", |
| 704 | value, int_id); |
| 705 | goto out_unlock; |
| 706 | } else { |
| 707 | int_desc = vm_interrupt_set_enable(vm_locked, int_id, |
| 708 | value == INT_ENABLE); |
| 709 | } |
| 710 | break; |
| 711 | default: |
| 712 | dlog_verbose("Interrupt reconfigure: Unsupported command %x\n", |
| 713 | command); |
| 714 | goto out_unlock; |
| 715 | } |
| 716 | |
| 717 | /* Check if the interrupt belongs to the current SP. */ |
| 718 | if (int_desc == NULL) { |
| 719 | dlog_verbose("Interrupt %x does not belong to current SP\n", |
| 720 | int_id); |
| 721 | goto out_unlock; |
| 722 | } |
| 723 | |
| 724 | ret = 0; |
| 725 | plat_interrupts_reconfigure_interrupt(*int_desc); |
| 726 | |
| 727 | out_unlock: |
| 728 | vm_unlock(&vm_locked); |
| 729 | |
| 730 | return ret; |
| 731 | } |
| 732 | |
Karl Meakin | 8d24554 | 2025-01-31 13:19:25 +0000 | [diff] [blame] | 733 | /** |
| 734 | * Run the vCPU in SPMC schedule mode under the runtime model for secure |
| 735 | * interrupt handling. |
| 736 | */ |
Karl Meakin | ca38ef9 | 2025-02-13 14:20:23 +0000 | [diff] [blame] | 737 | static void ffa_interrupts_run_in_sec_interrupt_rtm( |
Karl Meakin | 8d24554 | 2025-01-31 13:19:25 +0000 | [diff] [blame] | 738 | struct vcpu_locked target_vcpu_locked) |
| 739 | { |
| 740 | struct vcpu *target_vcpu; |
| 741 | |
| 742 | target_vcpu = target_vcpu_locked.vcpu; |
| 743 | |
| 744 | /* Mark the registers as unavailable now. */ |
| 745 | target_vcpu->regs_available = false; |
| 746 | target_vcpu->scheduling_mode = SPMC_MODE; |
| 747 | target_vcpu->rt_model = RTM_SEC_INTERRUPT; |
| 748 | target_vcpu->state = VCPU_STATE_RUNNING; |
Karl Meakin | 8d24554 | 2025-01-31 13:19:25 +0000 | [diff] [blame] | 749 | } |
| 750 | |
| 751 | bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked, |
| 752 | struct vcpu_locked next_locked, |
| 753 | struct ffa_value *signal_interrupt) |
| 754 | { |
Daniel Boulby | 4b9add5 | 2025-02-25 11:02:00 +0000 | [diff] [blame] | 755 | /* |
| 756 | * Since S-EL0 partitions will not receive the interrupt through a vIRQ |
| 757 | * signal in addition to the FFA_INTERRUPT ERET, make the interrupt no |
| 758 | * longer pending at this point. Otherwise keep it as pending for |
| 759 | * when the S-EL1 parition calls hf_interrupt_get. |
| 760 | */ |
| 761 | uint32_t intid = current_locked.vcpu->vm->el0_partition |
| 762 | ? vcpu_virt_interrupt_get_pending_and_enabled( |
| 763 | current_locked) |
| 764 | : vcpu_virt_interrupt_peek_pending_and_enabled( |
| 765 | current_locked); |
Karl Meakin | 8d24554 | 2025-01-31 13:19:25 +0000 | [diff] [blame] | 766 | |
| 767 | /* |
| 768 | * Check if there are any pending virtual secure interrupts to be |
| 769 | * handled. |
| 770 | */ |
Daniel Boulby | 3c1506b | 2025-02-25 10:49:51 +0000 | [diff] [blame] | 771 | if (intid != HF_INVALID_INTID) { |
Karl Meakin | 8d24554 | 2025-01-31 13:19:25 +0000 | [diff] [blame] | 772 | /* |
| 773 | * Prepare to signal virtual secure interrupt to S-EL0/S-EL1 SP |
| 774 | * in WAITING state. Refer to FF-A v1.2 Table 9.1 and Table 9.2 |
| 775 | * case 1. |
| 776 | */ |
| 777 | *signal_interrupt = api_ffa_interrupt_return(intid); |
| 778 | |
| 779 | /* |
| 780 | * Prepare to resume this partition's vCPU in SPMC |
| 781 | * schedule mode to handle virtual secure interrupt. |
| 782 | */ |
Karl Meakin | ca38ef9 | 2025-02-13 14:20:23 +0000 | [diff] [blame] | 783 | ffa_interrupts_run_in_sec_interrupt_rtm(current_locked); |
Karl Meakin | 8d24554 | 2025-01-31 13:19:25 +0000 | [diff] [blame] | 784 | |
| 785 | current_locked.vcpu->preempted_vcpu = next_locked.vcpu; |
| 786 | next_locked.vcpu->state = VCPU_STATE_PREEMPTED; |
| 787 | |
Daniel Boulby | 3c1506b | 2025-02-25 10:49:51 +0000 | [diff] [blame] | 788 | dlog_verbose( |
| 789 | "%s: Pending interrupt %d, intercepting FF-A call.\n", |
| 790 | __func__, intid); |
Karl Meakin | 8d24554 | 2025-01-31 13:19:25 +0000 | [diff] [blame] | 791 | |
| 792 | return true; |
| 793 | } |
| 794 | |
| 795 | return false; |
| 796 | } |