blob: e06d20a83aae0c6fd42bd147669e1d7b6ad684a7 [file] [log] [blame]
Karl Meakin8e58ddc2024-11-08 23:19:34 +00001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include "hf/plat/interrupts.h"
10
11#include "hf/arch/gicv3.h"
12#include "hf/arch/host_timer.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000013
14#include "hf/api.h"
15#include "hf/check.h"
Karl Meakin902af082024-11-28 14:58:38 +000016#include "hf/ffa/vm.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000017#include "hf/hf_ipi.h"
18#include "hf/vm.h"
19
20/**
21 * Drops the current interrupt priority and deactivate the given interrupt ID
22 * for the calling vCPU.
23 *
24 * Returns 0 on success, or -1 otherwise.
25 */
Karl Meakin117c8082024-12-04 16:03:28 +000026int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id,
27 struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000028{
29 struct vcpu_locked current_locked;
30 uint32_t int_id;
31 int ret = 0;
32
33 current_locked = vcpu_lock(current);
34 if (vint_id >= HF_NUM_INTIDS) {
35 ret = -1;
36 goto out;
37 }
38
39 /*
40 * Current implementation maps virtual interrupt to physical interrupt.
41 */
42 if (pint_id != vint_id) {
43 ret = -1;
44 goto out;
45 }
46
47 /*
48 * A malicious SP could de-activate an interrupt that does not belong to
49 * it. Return error to indicate failure.
50 */
51 if (!vcpu_interrupt_queue_peek(current_locked, &int_id)) {
52 dlog_error("No virtual interrupt to be deactivated\n");
53 ret = -1;
54 goto out;
55 }
56
57 if (int_id != vint_id) {
58 dlog_error("Unknown interrupt being deactivated %u\n", vint_id);
59 ret = -1;
60 goto out;
61 }
62
63 if (current->requires_deactivate_call) {
64 /* There is no preempted vCPU to resume. */
65 assert(current->preempted_vcpu == NULL);
66
67 vcpu_secure_interrupt_complete(current_locked);
68 }
69
70 /*
71 * Now that the virtual interrupt has been serviced and deactivated,
72 * remove it from the queue, if it was pending.
73 */
74 vcpu_interrupt_queue_pop(current_locked, &int_id);
75 assert(vint_id == int_id);
76out:
77 vcpu_unlock(&current_locked);
78 return ret;
79}
80
81static struct vcpu *plat_ffa_find_target_vcpu_secure_interrupt(
82 struct vcpu *current, uint32_t interrupt_id)
83{
84 /*
85 * Find which VM/SP owns this interrupt. We then find the
86 * corresponding vCPU context for this CPU.
87 */
88 for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) {
89 struct vm *vm = vm_find_index(index);
90
91 for (uint32_t j = 0; j < HF_NUM_INTIDS; j++) {
92 struct interrupt_descriptor int_desc =
93 vm->interrupt_desc[j];
94
95 /*
96 * Interrupt descriptors are populated
97 * contiguously.
98 */
99 if (!int_desc.valid) {
100 break;
101 }
102 if (int_desc.interrupt_id == interrupt_id) {
103 return api_ffa_get_vm_vcpu(vm, current);
104 }
105 }
106 }
107
108 return NULL;
109}
110
111static struct vcpu *plat_ffa_find_target_vcpu(struct vcpu *current,
112 uint32_t interrupt_id)
113{
114 struct vcpu *target_vcpu;
115
116 switch (interrupt_id) {
117 case HF_IPI_INTID:
118 target_vcpu = hf_ipi_get_pending_target_vcpu(current->cpu);
119 break;
120 case ARM_EL1_VIRT_TIMER_PHYS_INT:
121 /* Fall through */
122 case ARM_EL1_PHYS_TIMER_PHYS_INT:
123 panic("Timer interrupt not expected to fire: %u\n",
124 interrupt_id);
125 default:
126 target_vcpu = plat_ffa_find_target_vcpu_secure_interrupt(
127 current, interrupt_id);
128 }
129
130 /* The target vCPU for a secure interrupt cannot be NULL. */
131 CHECK(target_vcpu != NULL);
132
133 return target_vcpu;
134}
135
136/*
137 * Queue the pending virtual interrupt for target vcpu. Necessary fields
138 * tracking the secure interrupt processing are set accordingly.
139 */
140static void plat_ffa_queue_vint(struct vcpu_locked target_vcpu_locked,
141 uint32_t vint_id,
142 struct vcpu_locked current_locked)
143{
144 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
145 struct vcpu *preempted_vcpu = current_locked.vcpu;
146
147 if (preempted_vcpu != NULL) {
148 target_vcpu->preempted_vcpu = preempted_vcpu;
149 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
150 }
151
152 /* Queue the pending virtual interrupt for target vcpu. */
153 if (!vcpu_interrupt_queue_push(target_vcpu_locked, vint_id)) {
154 panic("Exhausted interrupt queue for vcpu of SP: %x\n",
155 target_vcpu->vm->id);
156 }
157}
158
159/**
160 * Enforce action of an SP in response to non-secure or other-secure interrupt
161 * by changing the priority mask. Effectively, physical interrupts shall not
162 * trigger which has the same effect as queueing interrupts.
163 */
164static void plat_ffa_vcpu_queue_interrupts(
165 struct vcpu_locked receiver_vcpu_locked)
166{
167 struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
168 uint8_t current_priority;
169
170 /* Save current value of priority mask. */
171 current_priority = plat_interrupts_get_priority_mask();
172 receiver_vcpu->prev_interrupt_priority = current_priority;
173
174 if (receiver_vcpu->vm->other_s_interrupts_action ==
175 OTHER_S_INT_ACTION_QUEUED ||
176 receiver_vcpu->scheduling_mode == SPMC_MODE) {
177 /*
178 * If secure interrupts not masked yet, mask them now. We could
179 * enter SPMC scheduled mode when an EL3 SPMD Logical partition
180 * sends a direct request, and we are making the IMPDEF choice
181 * to mask interrupts when such a situation occurs. This keeps
182 * design simple.
183 */
184 if (current_priority > SWD_MASK_ALL_INT) {
185 plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT);
186 }
187 } else if (receiver_vcpu->vm->ns_interrupts_action ==
188 NS_ACTION_QUEUED) {
189 /* If non secure interrupts not masked yet, mask them now. */
190 if (current_priority > SWD_MASK_NS_INT) {
191 plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
192 }
193 }
194}
195
196/**
197 * Handles the secure interrupt according to the target vCPU's state
198 * in the case the owner of the interrupt is an S-EL0 partition.
199 */
200static struct vcpu *plat_ffa_signal_secure_interrupt_sel0(
201 struct vcpu_locked current_locked,
202 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
203{
204 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
205 struct vcpu *next;
206
207 /* Secure interrupt signaling and queuing for S-EL0 SP. */
208 switch (target_vcpu->state) {
209 case VCPU_STATE_WAITING:
210 if (target_vcpu->cpu == current_locked.vcpu->cpu) {
211 struct ffa_value ret_interrupt =
212 api_ffa_interrupt_return(v_intid);
213
214 /* FF-A v1.1 EAC0 Table 8.1 case 1 and Table 12.10. */
215 dlog_verbose("S-EL0: Secure interrupt signaled: %x\n",
216 target_vcpu->vm->id);
217
218 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
219 plat_ffa_vcpu_queue_interrupts(target_vcpu_locked);
220
221 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
222
223 /*
224 * If the execution was in NWd as well, set the vCPU
225 * in preempted state as well.
226 */
227 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
228 current_locked);
229
230 /* Switch to target vCPU responsible for this interrupt.
231 */
232 next = target_vcpu;
233 } else {
234 dlog_verbose("S-EL0: Secure interrupt queued: %x\n",
235 target_vcpu->vm->id);
236 /*
237 * The target vcpu has migrated to a different physical
238 * CPU. Hence, it cannot be resumed on this CPU, SPMC
239 * resumes current vCPU.
240 */
241 next = NULL;
242 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
243 (struct vcpu_locked){.vcpu = NULL});
244 }
245 break;
246 case VCPU_STATE_BLOCKED:
247 case VCPU_STATE_PREEMPTED:
248 case VCPU_STATE_RUNNING:
249 dlog_verbose("S-EL0: Secure interrupt queued: %x\n",
250 target_vcpu->vm->id);
251 /*
252 * The target vCPU cannot be resumed, SPMC resumes current
253 * vCPU.
254 */
255 next = NULL;
256 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
257 (struct vcpu_locked){.vcpu = NULL});
258 break;
259 default:
260 panic("Secure interrupt cannot be signaled to target SP\n");
261 break;
262 }
263
264 return next;
265}
266
267static bool is_predecessor_in_call_chain(struct vcpu_locked current_locked,
268 struct vcpu_locked target_locked)
269{
270 struct vcpu *prev_node;
271 struct vcpu *current = current_locked.vcpu;
272 struct vcpu *target = target_locked.vcpu;
273
274 assert(current != NULL);
275 assert(target != NULL);
276
277 prev_node = current->call_chain.prev_node;
278
279 while (prev_node != NULL) {
280 if (prev_node == target) {
281 return true;
282 }
283
284 /* The target vCPU is not it's immediate predecessor. */
285 prev_node = prev_node->call_chain.prev_node;
286 }
287
288 /* Search terminated. Reached start of call chain. */
289 return false;
290}
291
292/**
293 * Handles the secure interrupt according to the target vCPU's state
294 * in the case the owner of the interrupt is an S-EL1 partition.
295 */
296static struct vcpu *plat_ffa_signal_secure_interrupt_sel1(
297 struct vcpu_locked current_locked,
298 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
299{
300 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
301 struct vcpu *current = current_locked.vcpu;
302 struct vcpu *next = NULL;
303
304 /* Secure interrupt signaling and queuing for S-EL1 SP. */
305 switch (target_vcpu->state) {
306 case VCPU_STATE_WAITING:
307 if (target_vcpu->cpu == current_locked.vcpu->cpu) {
308 struct ffa_value ret_interrupt =
309 api_ffa_interrupt_return(v_intid);
310
311 /* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */
312 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
313 plat_ffa_vcpu_queue_interrupts(target_vcpu_locked);
314
315 /*
316 * Ideally, we have to mask non-secure interrupts here
317 * since the spec mandates that SPMC should make sure
318 * SPMC scheduled call chain cannot be preempted by a
319 * non-secure interrupt. However, our current design
320 * takes care of it implicitly.
321 */
322 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
323
324 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
325 current_locked);
326 next = target_vcpu;
327 } else {
328 /*
329 * The target vcpu has migrated to a different physical
330 * CPU. Hence, it cannot be resumed on this CPU, SPMC
331 * resumes current vCPU.
332 */
333 assert(target_vcpu->vm->vcpu_count == 1);
334 dlog_verbose("S-EL1: Secure interrupt queued: %x\n",
335 target_vcpu->vm->id);
336 next = NULL;
337 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
338 (struct vcpu_locked){.vcpu = NULL});
339 }
340 break;
341 case VCPU_STATE_BLOCKED:
342 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
343 /*
344 * The target vcpu has migrated to a different physical
345 * CPU. Hence, it cannot be resumed on this CPU, SPMC
346 * resumes current vCPU.
347 */
348 assert(target_vcpu->vm->vcpu_count == 1);
349 next = NULL;
350 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
351 (struct vcpu_locked){.vcpu = NULL});
352 } else if (is_predecessor_in_call_chain(current_locked,
353 target_vcpu_locked)) {
354 struct ffa_value ret_interrupt =
355 api_ffa_interrupt_return(0);
356
357 /*
358 * If the target vCPU ran earlier in the same call
359 * chain as the current vCPU, SPMC leaves all
360 * intermediate execution contexts in blocked state and
361 * resumes the target vCPU for handling secure
362 * interrupt.
363 * Under the current design, there is only one possible
364 * scenario in which this could happen: both the
365 * preempted (i.e. current) and target vCPU are in the
366 * same NWd scheduled call chain and is described in the
367 * Scenario 1 of Table 8.4 in EAC0 spec.
368 */
369 assert(current_locked.vcpu->scheduling_mode ==
370 NWD_MODE);
371 assert(target_vcpu->scheduling_mode == NWD_MODE);
372
373 /*
374 * The execution preempted the call chain that involved
375 * the targeted and the current SPs.
376 * The targetted SP is set running, whilst the
377 * preempted SP is set PREEMPTED.
378 */
379 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
380
381 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
382 current_locked);
383
384 next = target_vcpu;
385 } else {
386 /*
387 * The target vCPU cannot be resumed now because it is
388 * in BLOCKED state (it yielded CPU cycles using
389 * FFA_YIELD). SPMC queues the virtual interrupt and
390 * resumes the current vCPU which could belong to either
391 * a VM or a SP.
392 */
393 next = NULL;
394 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
395 (struct vcpu_locked){.vcpu = NULL});
396 }
397 break;
398 case VCPU_STATE_PREEMPTED:
399 if (target_vcpu->cpu == current_locked.vcpu->cpu) {
400 /*
401 * We do not resume a target vCPU that has been already
402 * pre-empted by an interrupt. Make the vIRQ pending for
403 * target SP(i.e., queue the interrupt) and continue to
404 * resume current vCPU. Refer to section 8.3.2.1 bullet
405 * 3 in the FF-A v1.1 EAC0 spec.
406 */
407
408 if (current->vm->id == HF_OTHER_WORLD_ID) {
409 /*
410 * The target vCPU must have been preempted by a
411 * non secure interrupt. It could not have been
412 * preempted by a secure interrupt as current
413 * SPMC implementation does not allow secure
414 * interrupt prioritization. Moreover, the
415 * target vCPU should have been in Normal World
416 * scheduled mode as SPMC scheduled mode call
417 * chain cannot be preempted by a non secure
418 * interrupt.
419 */
420 CHECK(target_vcpu->scheduling_mode == NWD_MODE);
421 }
422 } else {
423 /*
424 * The target vcpu has migrated to a different physical
425 * CPU. Hence, it cannot be resumed on this CPU, SPMC
426 * resumes current vCPU.
427 */
428 assert(target_vcpu->vm->vcpu_count == 1);
429 }
430
431 next = NULL;
432 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
433 (struct vcpu_locked){.vcpu = NULL});
434
435 break;
436 case VCPU_STATE_RUNNING:
437 if (current == target_vcpu) {
438 /*
439 * This is the special scenario where the current
440 * running execution context also happens to be the
441 * target of the secure interrupt. In this case, it
442 * needs to signal completion of secure interrupt
443 * implicitly. Refer to the embedded comment in vcpu.h
444 * file for the description of this variable.
445 */
446
447 current->requires_deactivate_call = true;
448 } else {
449 /*
450 * The target vcpu has migrated to a different physical
451 * CPU. Hence, it cannot be resumed on this CPU, SPMC
452 * resumes current vCPU.
453 */
454 assert(target_vcpu->vm->vcpu_count == 1);
455 }
456 next = NULL;
457 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
458 (struct vcpu_locked){.vcpu = NULL});
459 break;
460 case VCPU_STATE_BLOCKED_INTERRUPT:
461 /* WFI is no-op for SP. Fall through. */
462 default:
463 /*
464 * vCPU of Target SP cannot be in OFF/ABORTED state if it has
465 * to handle secure interrupt.
466 */
467 panic("Secure interrupt cannot be signaled to target SP\n");
468 break;
469 }
470
471 return next;
472}
473
474/**
475 * Obtain the physical interrupt that triggered from the interrupt controller,
476 * and inject the corresponding virtual interrupt to the target vCPU.
477 * When PEs executing in the Normal World, and secure interrupts trigger,
478 * execution is trapped into EL3. SPMD then routes the interrupt to SPMC
479 * through FFA_INTERRUPT_32 ABI synchronously using eret conduit.
480 */
Karl Meakin117c8082024-12-04 16:03:28 +0000481void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
482 struct vcpu **next)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000483{
484 struct vcpu *target_vcpu;
485 struct vcpu_locked target_vcpu_locked =
486 (struct vcpu_locked){.vcpu = NULL};
487 struct vcpu_locked current_locked;
488 uint32_t intid;
489 struct vm_locked target_vm_locked;
490 uint32_t v_intid;
491
492 /* Find pending interrupt id. This also activates the interrupt. */
493 intid = plat_interrupts_get_pending_interrupt_id();
494 v_intid = intid;
495
496 switch (intid) {
497 case ARM_SEL2_TIMER_PHYS_INT:
498 /* Disable the S-EL2 physical timer */
499 host_timer_disable();
500 target_vcpu = timer_find_target_vcpu(current);
501
502 if (target_vcpu != NULL) {
503 v_intid = HF_VIRTUAL_TIMER_INTID;
504 break;
505 }
506 /*
507 * It is possible for target_vcpu to be NULL in case of spurious
508 * timer interrupt. Fall through.
509 */
510 case SPURIOUS_INTID_OTHER_WORLD:
511 /*
512 * Spurious interrupt ID indicating that there are no pending
513 * interrupts to acknowledge. For such scenarios, resume the
514 * current vCPU.
515 */
516 *next = NULL;
517 return;
518 default:
519 target_vcpu = plat_ffa_find_target_vcpu(current, intid);
520 break;
521 }
522
523 /*
524 * End the interrupt to drop the running priority. It also deactivates
525 * the physical interrupt. If not, the interrupt could trigger again
526 * after resuming current vCPU.
527 */
528 plat_interrupts_end_of_interrupt(intid);
529
530 target_vm_locked = vm_lock(target_vcpu->vm);
531
532 if (target_vcpu == current) {
533 current_locked = vcpu_lock(current);
534 target_vcpu_locked = current_locked;
535 } else {
536 struct two_vcpu_locked vcpus_locked;
537 /* Lock both vCPUs at once to avoid deadlock. */
538 vcpus_locked = vcpu_lock_both(current, target_vcpu);
539 current_locked = vcpus_locked.vcpu1;
540 target_vcpu_locked = vcpus_locked.vcpu2;
541 }
542
543 /*
544 * A race condition can occur with the execution contexts belonging to
545 * an MP SP. An interrupt targeting the execution context on present
546 * core can trigger while the execution context of this SP on a
547 * different core is being aborted. In such scenario, the physical
548 * interrupts beloning to the aborted SP are disabled and the current
549 * execution context is resumed.
550 */
551 if (target_vcpu->state == VCPU_STATE_ABORTED ||
552 atomic_load_explicit(&target_vcpu->vm->aborting,
553 memory_order_relaxed)) {
554 /* Clear fields corresponding to secure interrupt handling. */
555 vcpu_secure_interrupt_complete(target_vcpu_locked);
Karl Meakin117c8082024-12-04 16:03:28 +0000556 ffa_vm_disable_interrupts(target_vm_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000557
558 /* Resume current vCPU. */
559 *next = NULL;
560 } else {
561 /*
562 * SPMC has started handling a secure interrupt with a clean
563 * slate. This signal should be false unless there was a bug in
564 * source code. Hence, use assert rather than CHECK.
565 */
566 assert(!target_vcpu->requires_deactivate_call);
567
568 /* Set the interrupt pending in the target vCPU. */
569 vcpu_interrupt_inject(target_vcpu_locked, v_intid);
570
571 switch (intid) {
572 case HF_IPI_INTID:
573 if (hf_ipi_handle(target_vcpu_locked)) {
574 *next = NULL;
575 break;
576 }
577 /*
578 * Fall through in the case handling has not been fully
579 * completed.
580 */
581 default:
582 /*
583 * Either invoke the handler related to partitions from
584 * S-EL0 or from S-EL1.
585 */
586 *next = target_vcpu_locked.vcpu->vm->el0_partition
587 ? plat_ffa_signal_secure_interrupt_sel0(
588 current_locked,
589 target_vcpu_locked, v_intid)
590 : plat_ffa_signal_secure_interrupt_sel1(
591 current_locked,
592 target_vcpu_locked, v_intid);
593 }
594 }
595
596 if (target_vcpu_locked.vcpu != NULL) {
597 vcpu_unlock(&target_vcpu_locked);
598 }
599
600 vcpu_unlock(&current_locked);
601 vm_unlock(&target_vm_locked);
602}
603
Karl Meakin117c8082024-12-04 16:03:28 +0000604bool ffa_interrupts_inject_notification_pending_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000605 struct vcpu_locked target_locked, struct vcpu_locked current_locked,
606 struct vm_locked receiver_locked)
607{
608 struct vm *next_vm = target_locked.vcpu->vm;
609 bool ret = false;
610
611 /*
612 * Inject the NPI if:
613 * - The targeted VM ID is from this world (i.e. if it is an SP).
614 * - The partition has global pending notifications and an NPI hasn't
615 * been injected yet.
616 * - There are pending per-vCPU notifications in the next vCPU.
617 */
618 if (vm_id_is_current_world(next_vm->id) &&
619 (vm_are_per_vcpu_notifications_pending(
620 receiver_locked, vcpu_index(target_locked.vcpu)) ||
621 (vm_are_global_notifications_pending(receiver_locked) &&
622 !vm_notifications_is_npi_injected(receiver_locked)))) {
623 api_interrupt_inject_locked(target_locked,
624 HF_NOTIFICATION_PENDING_INTID,
625 current_locked, NULL);
626 vm_notifications_set_npi_injected(receiver_locked, true);
627 ret = true;
628 }
629
630 return ret;
631}
632
Karl Meakin117c8082024-12-04 16:03:28 +0000633struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000634{
635 struct vcpu *next;
636 struct two_vcpu_locked both_vcpu_locked;
637
638 /*
639 * The action specified by SP in its manifest is ``Non-secure interrupt
640 * is signaled``. Refer to section 8.2.4 rules and guidelines bullet 4.
641 * Hence, the call chain starts unwinding. The current vCPU must have
642 * been a part of NWd scheduled call chain. Therefore, it is pre-empted
643 * and execution is either handed back to the normal world or to the
644 * previous SP vCPU in the call chain through the FFA_INTERRUPT ABI.
645 * The api_preempt() call is equivalent to calling
646 * api_switch_to_other_world for current vCPU passing FFA_INTERRUPT. The
647 * SP can be resumed later by FFA_RUN.
648 */
649 CHECK(current_vcpu->scheduling_mode == NWD_MODE);
650 assert(current_vcpu->call_chain.next_node == NULL);
651
652 if (current_vcpu->call_chain.prev_node == NULL) {
653 /* End of NWd scheduled call chain */
654 return api_preempt(current_vcpu);
655 }
656
657 next = current_vcpu->call_chain.prev_node;
658 CHECK(next != NULL);
659
660 /*
661 * Lock both vCPUs. Strictly speaking, it may not be necessary since
662 * next is guaranteed to be in BLOCKED state as it is the predecessor of
663 * the current vCPU in the present call chain.
664 */
665 both_vcpu_locked = vcpu_lock_both(current_vcpu, next);
666
667 /* Removing a node from an existing call chain. */
668 current_vcpu->call_chain.prev_node = NULL;
669 current_vcpu->state = VCPU_STATE_PREEMPTED;
670
671 /*
672 * SPMC applies the runtime model till when the vCPU transitions from
673 * running to waiting state. Moreover, the SP continues to remain in
674 * its CPU cycle allocation mode. Hence, rt_model and scheduling_mode
675 * are not changed here.
676 */
677 assert(next->state == VCPU_STATE_BLOCKED);
678 assert(next->call_chain.next_node == current_vcpu);
679
680 next->call_chain.next_node = NULL;
681
682 vcpu_set_running(both_vcpu_locked.vcpu2,
683 &(struct ffa_value){
684 .func = FFA_INTERRUPT_32,
685 .arg1 = ffa_vm_vcpu(current_vcpu->vm->id,
686 vcpu_index(current_vcpu)),
687 });
688
689 sl_unlock(&next->lock);
690 sl_unlock(&current_vcpu->lock);
691
692 return next;
693}
694
695static void plat_ffa_enable_virtual_maintenance_interrupts(
696 struct vcpu_locked current_locked)
697{
698 struct vcpu *current;
699 struct interrupts *interrupts;
700 struct vm *vm;
701
702 current = current_locked.vcpu;
703 interrupts = &current->interrupts;
704 vm = current->vm;
705
Karl Meakin117c8082024-12-04 16:03:28 +0000706 if (ffa_vm_managed_exit_supported(vm)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000707 vcpu_virt_interrupt_set_enabled(interrupts,
708 HF_MANAGED_EXIT_INTID);
709 /*
710 * SPMC decides the interrupt type for Managed exit signal based
711 * on the partition manifest.
712 */
713 if (vm->me_signal_virq) {
714 vcpu_virt_interrupt_set_type(interrupts,
715 HF_MANAGED_EXIT_INTID,
716 INTERRUPT_TYPE_IRQ);
717 } else {
718 vcpu_virt_interrupt_set_type(interrupts,
719 HF_MANAGED_EXIT_INTID,
720 INTERRUPT_TYPE_FIQ);
721 }
722 }
723
724 if (vm->notifications.enabled) {
725 vcpu_virt_interrupt_set_enabled(interrupts,
726 HF_NOTIFICATION_PENDING_INTID);
727 }
728}
729
730/**
731 * Enable relevant virtual interrupts for Secure Partitions.
732 * For all SPs, any applicable virtual maintenance interrupts are enabled.
733 * Additionally, for S-EL0 partitions, all the interrupts declared in the
734 * partition manifest are enabled at the virtual interrupt controller
735 * interface early during the boot stage as an S-EL0 SP need not call
736 * HF_INTERRUPT_ENABLE hypervisor ABI explicitly.
737 */
Karl Meakin117c8082024-12-04 16:03:28 +0000738void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
739 struct vm_locked vm_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000740{
741 struct vcpu *current;
742 struct interrupts *interrupts;
743 struct vm *vm;
744
745 current = current_locked.vcpu;
746 interrupts = &current->interrupts;
747 vm = current->vm;
748 assert(vm == vm_locked.vm);
749
750 if (vm->el0_partition) {
751 for (uint32_t k = 0; k < VM_MANIFEST_MAX_INTERRUPTS; k++) {
752 struct interrupt_descriptor int_desc;
753
754 int_desc = vm_locked.vm->interrupt_desc[k];
755
756 /* Interrupt descriptors are populated contiguously. */
757 if (!int_desc.valid) {
758 break;
759 }
760 vcpu_virt_interrupt_set_enabled(interrupts,
761 int_desc.interrupt_id);
762 }
763 }
764
765 plat_ffa_enable_virtual_maintenance_interrupts(current_locked);
766}
767
768/**
769 * Reconfigure the interrupt belonging to the current partition at runtime.
770 * At present, this paravirtualized interface only allows the following
771 * commands which signify what change is being requested by the current
772 * partition:
773 * - Change the target CPU of the interrupt.
774 * - Change the security state of the interrupt.
775 * - Enable or disable the physical interrupt.
776 */
Karl Meakin117c8082024-12-04 16:03:28 +0000777int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command,
778 uint32_t value, struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000779{
780 struct vm *vm = current->vm;
781 struct vm_locked vm_locked;
782 int64_t ret = -1;
783 struct interrupt_descriptor *int_desc = NULL;
784
785 /*
786 * Lock VM to protect interrupt descriptor from being modified
787 * concurrently.
788 */
789 vm_locked = vm_lock(vm);
790
791 switch (command) {
792 case INT_RECONFIGURE_TARGET_PE:
793 /* Here, value represents the target PE index. */
794 if (value >= MAX_CPUS) {
795 dlog_verbose(
796 "Illegal target PE index specified while "
797 "reconfiguring interrupt %x\n",
798 int_id);
799 goto out_unlock;
800 }
801
802 /*
803 * An UP SP cannot reconfigure an interrupt to be targetted to
804 * any other physical CPU except the one it is currently
805 * running on.
806 */
807 if (vm_is_up(vm) && value != cpu_index(current->cpu)) {
808 dlog_verbose(
809 "Illegal target PE index specified by current "
810 "UP SP\n");
811 goto out_unlock;
812 }
813
814 /* Configure the interrupt to be routed to a specific CPU. */
815 int_desc = vm_interrupt_set_target_mpidr(
816 vm_locked, int_id, cpu_find_index(value)->id);
817 break;
818 case INT_RECONFIGURE_SEC_STATE:
819 /* Specify the new security state of the interrupt. */
820 if (value != INT_DESC_SEC_STATE_NS &&
821 value != INT_DESC_SEC_STATE_S) {
822 dlog_verbose(
823 "Illegal value %x specified while "
824 "reconfiguring interrupt %x\n",
825 value, int_id);
826 goto out_unlock;
827 }
828 int_desc = vm_interrupt_set_sec_state(vm_locked, int_id, value);
829 break;
830 case INT_RECONFIGURE_ENABLE:
831 /* Enable or disable the interrupt. */
832 if (value != INT_DISABLE && value != INT_ENABLE) {
833 dlog_verbose(
834 "Illegal value %x specified while "
835 "reconfiguring interrupt %x\n",
836 value, int_id);
837 goto out_unlock;
838 } else {
839 int_desc = vm_interrupt_set_enable(vm_locked, int_id,
840 value == INT_ENABLE);
841 }
842 break;
843 default:
844 dlog_verbose("Interrupt reconfigure: Unsupported command %x\n",
845 command);
846 goto out_unlock;
847 }
848
849 /* Check if the interrupt belongs to the current SP. */
850 if (int_desc == NULL) {
851 dlog_verbose("Interrupt %x does not belong to current SP\n",
852 int_id);
853 goto out_unlock;
854 }
855
856 ret = 0;
857 plat_interrupts_reconfigure_interrupt(*int_desc);
858
859out_unlock:
860 vm_unlock(&vm_locked);
861
862 return ret;
863}
864
865/* Returns the virtual interrupt id to be handled by SP. */
Karl Meakin117c8082024-12-04 16:03:28 +0000866uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000867{
868 uint32_t int_id;
869
870 /*
871 * If there are any virtual interrupts in the queue, return the first
872 * entry. Else, return the pending interrupt from the bitmap.
873 */
874 if (vcpu_interrupt_queue_peek(current_locked, &int_id)) {
875 struct interrupts *interrupts;
876
877 /*
878 * Mark the virtual interrupt as no longer pending and decrement
879 * the count.
880 */
881 interrupts = &current_locked.vcpu->interrupts;
882 vcpu_virt_interrupt_clear_pending(interrupts, int_id);
883 vcpu_interrupt_count_decrement(current_locked, interrupts,
884 int_id);
885
886 return int_id;
887 }
888
889 return api_interrupt_get(current_locked);
890}