blob: ded866a5d49cd246464ff6d59838565992921174 [file] [log] [blame]
Karl Meakin8e58ddc2024-11-08 23:19:34 +00001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include "hf/plat/interrupts.h"
10
11#include "hf/arch/gicv3.h"
12#include "hf/arch/host_timer.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000013
14#include "hf/api.h"
15#include "hf/check.h"
Karl Meakinfa1dcb82025-02-10 16:47:50 +000016#include "hf/ffa/direct_messaging.h"
J-Alvesce42f7a2025-02-10 13:57:41 +000017#include "hf/ffa/notifications.h"
Karl Meakin902af082024-11-28 14:58:38 +000018#include "hf/ffa/vm.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000019#include "hf/hf_ipi.h"
20#include "hf/vm.h"
21
22/**
Daniel Boulbyaa386fd2025-02-07 15:01:20 +000023 * This function has been deprecated and it's contents moved into
24 * api_interrupt_get in order to align the bitmap and queue for tracking
25 * interupts.
Karl Meakin8e58ddc2024-11-08 23:19:34 +000026 * Returns 0 on success, or -1 otherwise.
27 */
Karl Meakin117c8082024-12-04 16:03:28 +000028int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id,
29 struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000030{
Daniel Boulby3c1506b2025-02-25 10:49:51 +000031 (void)pint_id;
32 (void)vint_id;
Daniel Boulbyaa386fd2025-02-07 15:01:20 +000033 (void)current;
34 return 0;
Karl Meakin8e58ddc2024-11-08 23:19:34 +000035}
36
Karl Meakinca38ef92025-02-13 14:20:23 +000037static struct vcpu *ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +000038 struct vcpu *current, uint32_t interrupt_id)
39{
40 /*
41 * Find which VM/SP owns this interrupt. We then find the
42 * corresponding vCPU context for this CPU.
43 */
44 for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) {
45 struct vm *vm = vm_find_index(index);
46
47 for (uint32_t j = 0; j < HF_NUM_INTIDS; j++) {
48 struct interrupt_descriptor int_desc =
49 vm->interrupt_desc[j];
50
51 /*
52 * Interrupt descriptors are populated
53 * contiguously.
54 */
55 if (!int_desc.valid) {
56 break;
57 }
58 if (int_desc.interrupt_id == interrupt_id) {
59 return api_ffa_get_vm_vcpu(vm, current);
60 }
61 }
62 }
63
64 return NULL;
65}
66
Karl Meakinca38ef92025-02-13 14:20:23 +000067static struct vcpu *ffa_interrupts_find_target_vcpu(struct vcpu *current,
J-Alvesde211782025-02-07 14:44:39 +000068 uint32_t interrupt_id,
69 uint32_t *v_intid)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000070{
71 struct vcpu *target_vcpu;
72
J-Alvesde211782025-02-07 14:44:39 +000073 assert(current != NULL);
74 assert(v_intid != NULL);
75
76 *v_intid = interrupt_id;
77
Karl Meakin8e58ddc2024-11-08 23:19:34 +000078 switch (interrupt_id) {
J-Alvesde211782025-02-07 14:44:39 +000079 case SPURIOUS_INTID_OTHER_WORLD:
80 /*
81 * Spurious interrupt ID indicating that there are no pending
82 * interrupts to acknowledge. For such scenarios, resume the
83 * current vCPU.
84 */
85 target_vcpu = NULL;
86 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +000087 case HF_IPI_INTID:
Daniel Boulby7011b5a2024-10-15 18:27:26 +010088 /*
89 * Get the next vCPU with a pending IPI. If all vCPUs
90 * have had their IPIs handled this will return NULL.
91 */
92 target_vcpu = hf_ipi_get_pending_target_vcpu(current);
Karl Meakin8e58ddc2024-11-08 23:19:34 +000093 break;
J-Alvesde211782025-02-07 14:44:39 +000094 case ARM_SEL2_TIMER_PHYS_INT:
95 /* Disable the S-EL2 physical timer */
96 host_timer_disable();
97 target_vcpu = timer_find_target_vcpu(current);
98
99 if (target_vcpu != NULL) {
100 *v_intid = HF_VIRTUAL_TIMER_INTID;
101 }
102 /*
103 * It is possible for target_vcpu to be NULL in case of spurious
104 * timer interrupt.
105 */
106 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000107 case ARM_EL1_VIRT_TIMER_PHYS_INT:
108 /* Fall through */
109 case ARM_EL1_PHYS_TIMER_PHYS_INT:
110 panic("Timer interrupt not expected to fire: %u\n",
111 interrupt_id);
112 default:
Karl Meakinca38ef92025-02-13 14:20:23 +0000113 target_vcpu = ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000114 current, interrupt_id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000115
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100116 /* The target vCPU for a secure interrupt cannot be NULL. */
117 CHECK(target_vcpu != NULL);
118 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000119
120 return target_vcpu;
121}
122
123/*
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000124 * If the current vCPU is being preempted, record this in the target vCPU
125 * and set the current states to VCPU_STATE_PREEMPTED.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000126 */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000127static void ffa_interrupts_set_preempted_vcpu(
128 struct vcpu_locked target_vcpu_locked,
129 struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000130{
131 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
132 struct vcpu *preempted_vcpu = current_locked.vcpu;
133
J-Alvesac62a852025-02-07 19:03:07 +0000134 assert(target_vcpu != NULL);
135 assert(preempted_vcpu != NULL);
136
137 target_vcpu->preempted_vcpu = preempted_vcpu;
138 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000139}
140
141/**
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000142 * If the interrupts were indeed masked by SPMC before an SP's vCPU was resumed,
143 * restore the priority mask thereby allowing the interrupts to be delivered.
144 */
145void ffa_interrupts_unmask(struct vcpu *current)
146{
147 plat_interrupts_set_priority_mask(current->prev_interrupt_priority);
148}
149
150/**
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000151 * Enforce action of an SP in response to non-secure or other-secure interrupt
152 * by changing the priority mask. Effectively, physical interrupts shall not
153 * trigger which has the same effect as queueing interrupts.
154 */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000155void ffa_interrupts_mask(struct vcpu_locked receiver_vcpu_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000156{
157 struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
158 uint8_t current_priority;
159
160 /* Save current value of priority mask. */
161 current_priority = plat_interrupts_get_priority_mask();
162 receiver_vcpu->prev_interrupt_priority = current_priority;
163
164 if (receiver_vcpu->vm->other_s_interrupts_action ==
165 OTHER_S_INT_ACTION_QUEUED ||
166 receiver_vcpu->scheduling_mode == SPMC_MODE) {
167 /*
168 * If secure interrupts not masked yet, mask them now. We could
169 * enter SPMC scheduled mode when an EL3 SPMD Logical partition
170 * sends a direct request, and we are making the IMPDEF choice
171 * to mask interrupts when such a situation occurs. This keeps
172 * design simple.
173 */
174 if (current_priority > SWD_MASK_ALL_INT) {
175 plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT);
176 }
177 } else if (receiver_vcpu->vm->ns_interrupts_action ==
178 NS_ACTION_QUEUED) {
179 /* If non secure interrupts not masked yet, mask them now. */
180 if (current_priority > SWD_MASK_NS_INT) {
181 plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
182 }
183 }
184}
185
J-Alves20160602025-02-07 17:46:22 +0000186static struct vcpu *interrupt_resume_waiting(
187 struct vcpu_locked current_locked,
188 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
189{
190 struct vcpu *next = NULL;
191 struct ffa_value ret_interrupt = api_ffa_interrupt_return(v_intid);
192 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
193
194 /* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */
195 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
196 ffa_interrupts_mask(target_vcpu_locked);
197
198 if (target_vcpu_locked.vcpu->vm->el0_partition) {
199 /*
200 * Since S-EL0 partitions will not receive the interrupt through
201 * a vIRQ signal in addition to the FFA_INTERRUPT ERET, make the
202 * interrupt no longer pending at this point.
203 */
204 uint32_t pending_intid =
205 vcpu_virt_interrupt_get_pending_and_enabled(
206 target_vcpu_locked);
207 assert(pending_intid == v_intid);
208 }
209
210 /*
211 * Ideally, we have to mask non-secure interrupts here
212 * since the spec mandates that SPMC should make sure
213 * SPMC scheduled call chain cannot be preempted by a
214 * non-secure interrupt. However, our current design
215 * takes care of it implicitly.
216 */
217 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
218
219 ffa_interrupts_set_preempted_vcpu(target_vcpu_locked, current_locked);
220
221 next = target_vcpu;
222
223 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
224 /*
225 * The target vcpu could have migrated to a different
226 * physical CPU. SPMC will migrate it to current
227 * physical CPU and resume it.
228 */
229 assert(target_vcpu->vm->vcpu_count == 1);
230 target_vcpu->cpu = current_locked.vcpu->cpu;
231 }
232
233 return next;
234}
235
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000236/**
J-Alvesac62a852025-02-07 19:03:07 +0000237 * Handles the secure interrupt according to the target vCPU's state.
238 * Returns the next vCPU to resume accordingly.
239 * If it returns NULL, the current vCPU shall be resumed.
240 * This might be if the target vCPU is the current vCPU, or if the
241 * target vCPU is not in a state in which it can be resumed to handle
242 * the secure interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000243 */
J-Alvesac62a852025-02-07 19:03:07 +0000244static struct vcpu *ffa_interrupts_signal_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000245 struct vcpu_locked current_locked,
246 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
247{
248 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
249 struct vcpu *current = current_locked.vcpu;
250 struct vcpu *next = NULL;
251
J-Alves7e7fce02025-02-07 15:14:56 +0000252 /*
253 * The target vcpu has migrated to a different physical
254 * CPU. Hence, it cannot be resumed on this CPU, SPMC
255 * resumes current vCPU.
256 */
257 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
258 assert(target_vcpu->vm->vcpu_count == 1);
259 }
260
J-Alvesac62a852025-02-07 19:03:07 +0000261 /* Secure interrupt signaling and queuing for SP. */
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000262 switch (target_vcpu->state) {
J-Alves20160602025-02-07 17:46:22 +0000263 case VCPU_STATE_WAITING:
J-Alvesce42f7a2025-02-10 13:57:41 +0000264 if (!target_vcpu->vm->sri_policy.intr_while_waiting) {
265 next = interrupt_resume_waiting(
266 current_locked, target_vcpu_locked, v_intid);
267 } else {
268 dlog_verbose(
269 "%s: SP is waiting, SRI delayed due to "
270 "interrupt. Partition %x, vcpu %x, interrupt "
271 "%x\n",
272 __func__, target_vcpu->vm->id,
273 vcpu_index(target_vcpu), v_intid);
274 ffa_notifications_sri_set_delayed(target_vcpu->cpu);
275 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000276 break;
277 case VCPU_STATE_BLOCKED:
J-Alvesac62a852025-02-07 19:03:07 +0000278 if (!target_vcpu->vm->el0_partition &&
279 target_vcpu->cpu == current_locked.vcpu->cpu &&
J-Alves7e7fce02025-02-07 15:14:56 +0000280 ffa_direct_msg_precedes_in_call_chain(current_locked,
281 target_vcpu_locked)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000282 struct ffa_value ret_interrupt =
283 api_ffa_interrupt_return(0);
284
285 /*
286 * If the target vCPU ran earlier in the same call
287 * chain as the current vCPU, SPMC leaves all
288 * intermediate execution contexts in blocked state and
289 * resumes the target vCPU for handling secure
290 * interrupt.
291 * Under the current design, there is only one possible
292 * scenario in which this could happen: both the
293 * preempted (i.e. current) and target vCPU are in the
294 * same NWd scheduled call chain and is described in the
295 * Scenario 1 of Table 8.4 in EAC0 spec.
296 */
297 assert(current_locked.vcpu->scheduling_mode ==
298 NWD_MODE);
299 assert(target_vcpu->scheduling_mode == NWD_MODE);
300
301 /*
302 * The execution preempted the call chain that involved
303 * the targeted and the current SPs.
304 * The targetted SP is set running, whilst the
305 * preempted SP is set PREEMPTED.
306 */
307 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
308
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000309 ffa_interrupts_set_preempted_vcpu(target_vcpu_locked,
310 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000311 next = target_vcpu;
J-Alvesac62a852025-02-07 19:03:07 +0000312 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000313 }
J-Alvesac62a852025-02-07 19:03:07 +0000314
315 /*
316 * `next` is NULL.
317 * Either:
318 * - EL0 paritition can't be resumed when in blocked state.
319 * - The target vCPU has migrated to a different
320 * physical CPU. Hence, it cannot be resumed on this
321 * CPU, SPMC resumes current vCPU.
322 * - The target vCPU cannot be resumed now because it is
323 * in BLOCKED state (it yielded CPU cycles using
324 * FFA_YIELD). SPMC queues the virtual interrupt and
325 * resumes the current vCPU which could belong to either
326 * a VM or a SP.
327 */
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000328 break;
329 case VCPU_STATE_PREEMPTED:
J-Alves7e7fce02025-02-07 15:14:56 +0000330 /*
331 * We do not resume a target vCPU that has been already
332 * pre-empted by an interrupt. Make the vIRQ pending for
333 * target SP(i.e., queue the interrupt) and continue to
334 * resume current vCPU. Refer to section 8.3.2.1 bullet
335 * 3 in the FF-A v1.1 EAC0 spec.
336 */
J-Alvesac62a852025-02-07 19:03:07 +0000337 if (!target_vcpu->vm->el0_partition &&
338 target_vcpu->cpu == current_locked.vcpu->cpu &&
J-Alves7e7fce02025-02-07 15:14:56 +0000339 current->vm->id == HF_OTHER_WORLD_ID) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000340 /*
J-Alves7e7fce02025-02-07 15:14:56 +0000341 * The target vCPU must have been preempted by a
342 * non secure interrupt. It could not have been
343 * preempted by a secure interrupt as current
344 * SPMC implementation does not allow secure
345 * interrupt prioritization. Moreover, the
346 * target vCPU should have been in Normal World
347 * scheduled mode as SPMC scheduled mode call
348 * chain cannot be preempted by a non secure
349 * interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000350 */
J-Alves7e7fce02025-02-07 15:14:56 +0000351 CHECK(target_vcpu->scheduling_mode == NWD_MODE);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000352 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000353 break;
354 case VCPU_STATE_RUNNING:
J-Alvesac62a852025-02-07 19:03:07 +0000355 /*
356 * Interrupt has been injected in the vCPU state.
357 */
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000358 break;
359 case VCPU_STATE_BLOCKED_INTERRUPT:
360 /* WFI is no-op for SP. Fall through. */
361 default:
362 /*
363 * vCPU of Target SP cannot be in OFF/ABORTED state if it has
364 * to handle secure interrupt.
365 */
366 panic("Secure interrupt cannot be signaled to target SP\n");
367 break;
368 }
369
370 return next;
371}
372
373/**
374 * Obtain the physical interrupt that triggered from the interrupt controller,
375 * and inject the corresponding virtual interrupt to the target vCPU.
376 * When PEs executing in the Normal World, and secure interrupts trigger,
377 * execution is trapped into EL3. SPMD then routes the interrupt to SPMC
378 * through FFA_INTERRUPT_32 ABI synchronously using eret conduit.
379 */
Karl Meakin117c8082024-12-04 16:03:28 +0000380void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
381 struct vcpu **next)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000382{
383 struct vcpu *target_vcpu;
384 struct vcpu_locked target_vcpu_locked =
385 (struct vcpu_locked){.vcpu = NULL};
386 struct vcpu_locked current_locked;
387 uint32_t intid;
388 struct vm_locked target_vm_locked;
389 uint32_t v_intid;
390
391 /* Find pending interrupt id. This also activates the interrupt. */
392 intid = plat_interrupts_get_pending_interrupt_id();
393 v_intid = intid;
394
J-Alvesde211782025-02-07 14:44:39 +0000395 /* Get the target vCPU and get the virtual interrupt ID. */
396 target_vcpu = ffa_interrupts_find_target_vcpu(current, intid, &v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000397
398 /*
J-Alvesde211782025-02-07 14:44:39 +0000399 * Spurious interrupt ID indicates there is no pending interrupt to
400 * acknowledge so we do not need to call end of interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000401 */
J-Alvesde211782025-02-07 14:44:39 +0000402 if (v_intid != SPURIOUS_INTID_OTHER_WORLD) {
403 /*
404 * End the interrupt to drop the running priority. It also
405 * deactivates the physical interrupt. If not, the interrupt
406 * could trigger again after resuming current vCPU.
407 */
408 plat_interrupts_end_of_interrupt(intid);
409 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000410
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100411 if (target_vcpu == NULL) {
412 /* No further handling required. Resume the current vCPU. */
413 *next = NULL;
414 return;
415 }
416
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000417 target_vm_locked = vm_lock(target_vcpu->vm);
418
419 if (target_vcpu == current) {
420 current_locked = vcpu_lock(current);
421 target_vcpu_locked = current_locked;
422 } else {
423 struct two_vcpu_locked vcpus_locked;
424 /* Lock both vCPUs at once to avoid deadlock. */
425 vcpus_locked = vcpu_lock_both(current, target_vcpu);
426 current_locked = vcpus_locked.vcpu1;
427 target_vcpu_locked = vcpus_locked.vcpu2;
428 }
429
430 /*
431 * A race condition can occur with the execution contexts belonging to
432 * an MP SP. An interrupt targeting the execution context on present
433 * core can trigger while the execution context of this SP on a
434 * different core is being aborted. In such scenario, the physical
435 * interrupts beloning to the aborted SP are disabled and the current
436 * execution context is resumed.
437 */
438 if (target_vcpu->state == VCPU_STATE_ABORTED ||
439 atomic_load_explicit(&target_vcpu->vm->aborting,
440 memory_order_relaxed)) {
441 /* Clear fields corresponding to secure interrupt handling. */
442 vcpu_secure_interrupt_complete(target_vcpu_locked);
Karl Meakin117c8082024-12-04 16:03:28 +0000443 ffa_vm_disable_interrupts(target_vm_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000444
445 /* Resume current vCPU. */
446 *next = NULL;
447 } else {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000448 /* Set the interrupt pending in the target vCPU. */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000449 vcpu_virt_interrupt_inject(target_vcpu_locked, v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000450
451 switch (intid) {
452 case HF_IPI_INTID:
453 if (hf_ipi_handle(target_vcpu_locked)) {
454 *next = NULL;
455 break;
456 }
457 /*
458 * Fall through in the case handling has not been fully
459 * completed.
460 */
461 default:
462 /*
463 * Either invoke the handler related to partitions from
464 * S-EL0 or from S-EL1.
465 */
J-Alvesac62a852025-02-07 19:03:07 +0000466 *next = ffa_interrupts_signal_secure_interrupt(
467 current_locked, target_vcpu_locked, v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000468 }
469 }
470
471 if (target_vcpu_locked.vcpu != NULL) {
472 vcpu_unlock(&target_vcpu_locked);
473 }
474
475 vcpu_unlock(&current_locked);
476 vm_unlock(&target_vm_locked);
477}
478
Karl Meakin117c8082024-12-04 16:03:28 +0000479bool ffa_interrupts_inject_notification_pending_interrupt(
Daniel Boulbyd49d0772025-01-15 11:19:36 +0000480 struct vcpu_locked target_locked, struct vm_locked receiver_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000481{
482 struct vm *next_vm = target_locked.vcpu->vm;
483 bool ret = false;
484
485 /*
486 * Inject the NPI if:
487 * - The targeted VM ID is from this world (i.e. if it is an SP).
Daniel Boulby6e046112025-02-25 17:33:48 +0000488 * - The partition has global pending notifications or there are
489 * pending per-vCPU notifications in the next vCPU.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000490 */
491 if (vm_id_is_current_world(next_vm->id) &&
492 (vm_are_per_vcpu_notifications_pending(
493 receiver_locked, vcpu_index(target_locked.vcpu)) ||
Daniel Boulby6e046112025-02-25 17:33:48 +0000494 vm_are_global_notifications_pending(receiver_locked))) {
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000495 vcpu_virt_interrupt_inject(target_locked,
496 HF_NOTIFICATION_PENDING_INTID);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000497 ret = true;
498 }
499
500 return ret;
501}
502
Karl Meakin117c8082024-12-04 16:03:28 +0000503struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000504{
505 struct vcpu *next;
506 struct two_vcpu_locked both_vcpu_locked;
507
508 /*
509 * The action specified by SP in its manifest is ``Non-secure interrupt
510 * is signaled``. Refer to section 8.2.4 rules and guidelines bullet 4.
511 * Hence, the call chain starts unwinding. The current vCPU must have
512 * been a part of NWd scheduled call chain. Therefore, it is pre-empted
513 * and execution is either handed back to the normal world or to the
514 * previous SP vCPU in the call chain through the FFA_INTERRUPT ABI.
515 * The api_preempt() call is equivalent to calling
516 * api_switch_to_other_world for current vCPU passing FFA_INTERRUPT. The
517 * SP can be resumed later by FFA_RUN.
518 */
519 CHECK(current_vcpu->scheduling_mode == NWD_MODE);
520 assert(current_vcpu->call_chain.next_node == NULL);
521
522 if (current_vcpu->call_chain.prev_node == NULL) {
523 /* End of NWd scheduled call chain */
524 return api_preempt(current_vcpu);
525 }
526
527 next = current_vcpu->call_chain.prev_node;
528 CHECK(next != NULL);
529
530 /*
531 * Lock both vCPUs. Strictly speaking, it may not be necessary since
532 * next is guaranteed to be in BLOCKED state as it is the predecessor of
533 * the current vCPU in the present call chain.
534 */
535 both_vcpu_locked = vcpu_lock_both(current_vcpu, next);
536
537 /* Removing a node from an existing call chain. */
538 current_vcpu->call_chain.prev_node = NULL;
539 current_vcpu->state = VCPU_STATE_PREEMPTED;
540
541 /*
542 * SPMC applies the runtime model till when the vCPU transitions from
543 * running to waiting state. Moreover, the SP continues to remain in
544 * its CPU cycle allocation mode. Hence, rt_model and scheduling_mode
545 * are not changed here.
546 */
547 assert(next->state == VCPU_STATE_BLOCKED);
548 assert(next->call_chain.next_node == current_vcpu);
549
550 next->call_chain.next_node = NULL;
551
552 vcpu_set_running(both_vcpu_locked.vcpu2,
553 &(struct ffa_value){
554 .func = FFA_INTERRUPT_32,
555 .arg1 = ffa_vm_vcpu(current_vcpu->vm->id,
556 vcpu_index(current_vcpu)),
557 });
558
559 sl_unlock(&next->lock);
560 sl_unlock(&current_vcpu->lock);
561
562 return next;
563}
564
Karl Meakinca38ef92025-02-13 14:20:23 +0000565static void ffa_interrupts_enable_virtual_maintenance_interrupts(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000566 struct vcpu_locked current_locked)
567{
568 struct vcpu *current;
569 struct interrupts *interrupts;
570 struct vm *vm;
571
572 current = current_locked.vcpu;
573 interrupts = &current->interrupts;
574 vm = current->vm;
575
Karl Meakin117c8082024-12-04 16:03:28 +0000576 if (ffa_vm_managed_exit_supported(vm)) {
Daniel Boulbyd633a612025-03-07 18:08:04 +0000577 vcpu_virt_interrupt_enable(current_locked,
578 HF_MANAGED_EXIT_INTID, true);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000579 /*
580 * SPMC decides the interrupt type for Managed exit signal based
581 * on the partition manifest.
582 */
583 if (vm->me_signal_virq) {
584 vcpu_virt_interrupt_set_type(interrupts,
585 HF_MANAGED_EXIT_INTID,
586 INTERRUPT_TYPE_IRQ);
587 } else {
588 vcpu_virt_interrupt_set_type(interrupts,
589 HF_MANAGED_EXIT_INTID,
590 INTERRUPT_TYPE_FIQ);
591 }
592 }
593
594 if (vm->notifications.enabled) {
Daniel Boulbyd633a612025-03-07 18:08:04 +0000595 vcpu_virt_interrupt_enable(current_locked,
596 HF_NOTIFICATION_PENDING_INTID, true);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000597 }
598}
599
600/**
601 * Enable relevant virtual interrupts for Secure Partitions.
602 * For all SPs, any applicable virtual maintenance interrupts are enabled.
603 * Additionally, for S-EL0 partitions, all the interrupts declared in the
604 * partition manifest are enabled at the virtual interrupt controller
605 * interface early during the boot stage as an S-EL0 SP need not call
606 * HF_INTERRUPT_ENABLE hypervisor ABI explicitly.
607 */
Karl Meakin117c8082024-12-04 16:03:28 +0000608void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
609 struct vm_locked vm_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000610{
611 struct vcpu *current;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000612 struct vm *vm;
613
614 current = current_locked.vcpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000615 vm = current->vm;
616 assert(vm == vm_locked.vm);
617
618 if (vm->el0_partition) {
619 for (uint32_t k = 0; k < VM_MANIFEST_MAX_INTERRUPTS; k++) {
620 struct interrupt_descriptor int_desc;
621
622 int_desc = vm_locked.vm->interrupt_desc[k];
623
624 /* Interrupt descriptors are populated contiguously. */
625 if (!int_desc.valid) {
626 break;
627 }
Daniel Boulbyd633a612025-03-07 18:08:04 +0000628 vcpu_virt_interrupt_enable(current_locked,
629 int_desc.interrupt_id, true);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000630 }
631 }
632
Karl Meakinca38ef92025-02-13 14:20:23 +0000633 ffa_interrupts_enable_virtual_maintenance_interrupts(current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000634}
635
636/**
637 * Reconfigure the interrupt belonging to the current partition at runtime.
638 * At present, this paravirtualized interface only allows the following
639 * commands which signify what change is being requested by the current
640 * partition:
641 * - Change the target CPU of the interrupt.
642 * - Change the security state of the interrupt.
643 * - Enable or disable the physical interrupt.
644 */
Karl Meakin117c8082024-12-04 16:03:28 +0000645int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command,
646 uint32_t value, struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000647{
648 struct vm *vm = current->vm;
649 struct vm_locked vm_locked;
650 int64_t ret = -1;
651 struct interrupt_descriptor *int_desc = NULL;
652
653 /*
654 * Lock VM to protect interrupt descriptor from being modified
655 * concurrently.
656 */
657 vm_locked = vm_lock(vm);
658
659 switch (command) {
660 case INT_RECONFIGURE_TARGET_PE:
661 /* Here, value represents the target PE index. */
662 if (value >= MAX_CPUS) {
663 dlog_verbose(
664 "Illegal target PE index specified while "
665 "reconfiguring interrupt %x\n",
666 int_id);
667 goto out_unlock;
668 }
669
670 /*
671 * An UP SP cannot reconfigure an interrupt to be targetted to
672 * any other physical CPU except the one it is currently
673 * running on.
674 */
675 if (vm_is_up(vm) && value != cpu_index(current->cpu)) {
676 dlog_verbose(
677 "Illegal target PE index specified by current "
678 "UP SP\n");
679 goto out_unlock;
680 }
681
682 /* Configure the interrupt to be routed to a specific CPU. */
683 int_desc = vm_interrupt_set_target_mpidr(
684 vm_locked, int_id, cpu_find_index(value)->id);
685 break;
686 case INT_RECONFIGURE_SEC_STATE:
687 /* Specify the new security state of the interrupt. */
688 if (value != INT_DESC_SEC_STATE_NS &&
689 value != INT_DESC_SEC_STATE_S) {
690 dlog_verbose(
691 "Illegal value %x specified while "
692 "reconfiguring interrupt %x\n",
693 value, int_id);
694 goto out_unlock;
695 }
696 int_desc = vm_interrupt_set_sec_state(vm_locked, int_id, value);
697 break;
698 case INT_RECONFIGURE_ENABLE:
699 /* Enable or disable the interrupt. */
700 if (value != INT_DISABLE && value != INT_ENABLE) {
701 dlog_verbose(
702 "Illegal value %x specified while "
703 "reconfiguring interrupt %x\n",
704 value, int_id);
705 goto out_unlock;
706 } else {
707 int_desc = vm_interrupt_set_enable(vm_locked, int_id,
708 value == INT_ENABLE);
709 }
710 break;
711 default:
712 dlog_verbose("Interrupt reconfigure: Unsupported command %x\n",
713 command);
714 goto out_unlock;
715 }
716
717 /* Check if the interrupt belongs to the current SP. */
718 if (int_desc == NULL) {
719 dlog_verbose("Interrupt %x does not belong to current SP\n",
720 int_id);
721 goto out_unlock;
722 }
723
724 ret = 0;
725 plat_interrupts_reconfigure_interrupt(*int_desc);
726
727out_unlock:
728 vm_unlock(&vm_locked);
729
730 return ret;
731}
732
Karl Meakin8d245542025-01-31 13:19:25 +0000733/**
734 * Run the vCPU in SPMC schedule mode under the runtime model for secure
735 * interrupt handling.
736 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000737static void ffa_interrupts_run_in_sec_interrupt_rtm(
Karl Meakin8d245542025-01-31 13:19:25 +0000738 struct vcpu_locked target_vcpu_locked)
739{
740 struct vcpu *target_vcpu;
741
742 target_vcpu = target_vcpu_locked.vcpu;
743
744 /* Mark the registers as unavailable now. */
745 target_vcpu->regs_available = false;
746 target_vcpu->scheduling_mode = SPMC_MODE;
747 target_vcpu->rt_model = RTM_SEC_INTERRUPT;
748 target_vcpu->state = VCPU_STATE_RUNNING;
Karl Meakin8d245542025-01-31 13:19:25 +0000749}
750
751bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
752 struct vcpu_locked next_locked,
753 struct ffa_value *signal_interrupt)
754{
J-Alves4796d112025-02-12 14:39:00 +0000755 uint32_t intid;
756 struct vm *current_vm = current_locked.vcpu->vm;
757
758 /* No pending interrupts, no need to intercept or trigger SRI. */
759 if (vcpu_virt_interrupt_count_get(current_locked) == 0U) {
760 return false;
761 }
762
763 /*
764 * Do not intercept the call.
765 * Let the partition go into waiting state as planned.
766 * Pend the SRI on the next world switch to the NWd.
767 */
768 if (current_vm->sri_policy.intr_pending_entry_wait) {
769 dlog_verbose(
770 "Partition entry to wait. Interrupts pending. Send "
771 "SRI.\n");
772 ffa_notifications_sri_set_delayed(current_locked.vcpu->cpu);
773 return false;
774 }
775
Daniel Boulby4b9add52025-02-25 11:02:00 +0000776 /*
777 * Since S-EL0 partitions will not receive the interrupt through a vIRQ
778 * signal in addition to the FFA_INTERRUPT ERET, make the interrupt no
779 * longer pending at this point. Otherwise keep it as pending for
780 * when the S-EL1 parition calls hf_interrupt_get.
781 */
J-Alves4796d112025-02-12 14:39:00 +0000782 intid = current_locked.vcpu->vm->el0_partition
783 ? vcpu_virt_interrupt_get_pending_and_enabled(
784 current_locked)
785 : vcpu_virt_interrupt_peek_pending_and_enabled(
786 current_locked);
Karl Meakin8d245542025-01-31 13:19:25 +0000787
788 /*
J-Alves4796d112025-02-12 14:39:00 +0000789 * At this point there are interrupts pending, and the partition
790 * didn't configure an SRI policy for handling interrupts.
791 *
792 * Prepare to signal virtual secure interrupt to S-EL0/S-EL1 SP
793 * in WAITING state. Refer to FF-A v1.2 Table 9.1 and Table 9.2
794 * case 1.
Karl Meakin8d245542025-01-31 13:19:25 +0000795 */
J-Alves4796d112025-02-12 14:39:00 +0000796 *signal_interrupt = api_ffa_interrupt_return(intid);
Karl Meakin8d245542025-01-31 13:19:25 +0000797
J-Alves4796d112025-02-12 14:39:00 +0000798 /*
799 * Prepare to resume this partition's vCPU in SPMC
800 * schedule mode to handle virtual secure interrupt.
801 */
802 ffa_interrupts_run_in_sec_interrupt_rtm(current_locked);
Karl Meakin8d245542025-01-31 13:19:25 +0000803
J-Alves4796d112025-02-12 14:39:00 +0000804 current_locked.vcpu->preempted_vcpu = next_locked.vcpu;
805 next_locked.vcpu->state = VCPU_STATE_PREEMPTED;
Karl Meakin8d245542025-01-31 13:19:25 +0000806
J-Alves4796d112025-02-12 14:39:00 +0000807 dlog_verbose("%s: Pending interrupt %d, intercepting FF-A call.\n",
808 __func__, intid);
Karl Meakin8d245542025-01-31 13:19:25 +0000809
J-Alves4796d112025-02-12 14:39:00 +0000810 return true;
Karl Meakin8d245542025-01-31 13:19:25 +0000811}