feat(notifications): notifications pending interrupt
This patch injects a notification pending interrupt (NPI) into the
vCPU of a receiver with pending notification:
- If notification is global the NPI is injected at context switch from
NWd to the receiver SP, at the vCPU that the scheduler gave CPU cycles
to the receiver.
- If the notification is per-vCPU the notification is injected at the
handling of FFA_NOTIFICATION_SET, into the core that is specified in
arguments of the call. It should be handled when the SP and the
respective vCPU is next executed by the scheduler.
Change-Id: Ic1232581b3861313e11c488be5091bf803a70f6a
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/src/api.c b/src/api.c
index 330dc72..e353bd9 100644
--- a/src/api.c
+++ b/src/api.c
@@ -2809,7 +2809,7 @@
goto out;
}
- /* Set notifications pending */
+ /* Set notifications pending. */
vm_notifications_set(receiver_locked, plat_ffa_is_vm_id(sender_vm_id),
notifications, vcpu_id, is_per_vcpu);
dlog_verbose("Set the notifications: %x.\n", notifications);
@@ -2822,6 +2822,22 @@
plat_ffa_sri_state_set(DELAYED);
}
+ /*
+ * If notifications set are per-vCPU and the receiver is SP, the
+ * Notifications Pending Interrupt can be injected now.
+ * If not, it should be injected when the scheduler gives it CPU cycles
+ * in a specific vCPU.
+ */
+ if (is_per_vcpu && vm_id_is_current_world(receiver_vm_id)) {
+ struct vcpu *target_vcpu =
+ vm_get_vcpu(receiver_locked.vm, vcpu_id);
+
+ dlog_verbose("Per-vCPU notification, pending NPI.\n");
+ internal_interrupt_inject(
+ target_vcpu, HF_NOTIFICATION_PENDING_INTERRUPT_INTID,
+ current, NULL);
+ }
+
ret = (struct ffa_value){.func = FFA_SUCCESS_32};
out:
vm_unlock(&receiver_locked);
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index a5f32cf..2907d50 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -736,6 +736,8 @@
plat_ffa_sri_trigger_if_delayed(vcpu->cpu);
}
#endif
+ plat_ffa_inject_notification_pending_interrupt_context_switch(
+ *next, vcpu);
arch_regs_set_retval(&vcpu->regs, args);
vcpu_update_virtual_interrupts(*next);
return true;
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index 5fdc54f..1aafad2 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -289,3 +289,10 @@
void plat_ffa_sri_init(void)
{
}
+
+void plat_ffa_inject_notification_pending_interrupt_context_switch(
+ struct vcpu *next, struct vcpu *current)
+{
+ (void)next;
+ (void)current;
+}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 6a2cb4f..b4f6df9 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -541,3 +541,10 @@
{
(void)cpu;
}
+
+void plat_ffa_inject_notification_pending_interrupt_context_switch(
+ struct vcpu *next, struct vcpu *current)
+{
+ (void)next;
+ (void)current;
+}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 0444b30..8b272b9 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -1171,3 +1171,39 @@
plat_interrupts_configure_interrupt(sri_desc);
}
+
+void plat_ffa_inject_notification_pending_interrupt_context_switch(
+ struct vcpu *next, struct vcpu *current)
+{
+ CHECK(current != NULL);
+ /*
+ * If NWd is giving CPU cycles to SP, check if it is necessary
+ * to inject VI Notifications Pending Interrupt.
+ */
+ if (current->vm->id == HF_OTHER_WORLD_ID && next != NULL &&
+ vm_id_is_current_world(next->vm->id)) {
+ struct vm_locked target_vm_locked =
+ vm_find_locked(next->vm->id);
+ /*
+ * If per-vCPU notifications are pending, NPI has been
+ * injected at FFA_NOTIFICATION_SET handling in the
+ * targeted vCPU. If next SP has pending global
+ * notifications, only inject if there are no pending
+ * per-vCPU notifications, to avoid injecting spurious
+ * interrupt.
+ */
+ if (!vm_are_per_vcpu_notifications_pending(target_vm_locked,
+ vcpu_index(next)) &&
+ vm_are_global_notifications_pending(target_vm_locked)) {
+ struct vcpu_locked next_locked = vcpu_lock(next);
+
+ api_interrupt_inject_locked(
+ next_locked,
+ HF_NOTIFICATION_PENDING_INTERRUPT_INTID,
+ current, NULL);
+
+ vcpu_unlock(&next_locked);
+ }
+ vm_unlock(&target_vm_locked);
+ }
+}
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 04262bb..d9bfc8b 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -276,3 +276,10 @@
{
(void)cpu;
}
+
+void plat_ffa_inject_notification_pending_interrupt_context_switch(
+ struct vcpu *next, struct vcpu *current)
+{
+ (void)next;
+ (void)current;
+}
diff --git a/src/vm.c b/src/vm.c
index e4bb2e1..47d4650 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -427,6 +427,13 @@
return mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
}
+static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
+ bool is_from_vm)
+{
+ return is_from_vm ? &vm_locked.vm->notifications.from_vm
+ : &vm_locked.vm->notifications.from_sp;
+}
+
/*
* Initializes the notifications structure.
*/
@@ -447,8 +454,7 @@
CHECK(vm_locked.vm != NULL);
- to_check = from_vm ? &vm_locked.vm->notifications.from_vm
- : &vm_locked.vm->notifications.from_sp;
+ to_check = vm_get_notifications(vm_locked, from_vm);
/* Check if there are pending per vcpu notifications */
for (uint32_t i = 0U; i < MAX_CPUS; i++) {
@@ -461,6 +467,33 @@
return (to_check->global.pending & notifications) != 0U;
}
+/**
+ * Checks if there are pending global notifications, either from SPs or from
+ * VMs.
+ */
+bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
+{
+ return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
+ vm_get_notifications(vm_locked, false)->global.pending != 0ULL;
+}
+
+/**
+ * Checks if there are pending per-vCPU notifications, in a specific vCPU either
+ * from SPs or from VMs.
+ */
+bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
+ ffa_vcpu_index_t vcpu_id)
+{
+ CHECK(vcpu_id < MAX_CPUS);
+
+ return vm_get_notifications(vm_locked, true)
+ ->per_vcpu[vcpu_id]
+ .pending != 0ULL ||
+ vm_get_notifications(vm_locked, false)
+ ->per_vcpu[vcpu_id]
+ .pending != 0ULL;
+}
+
bool vm_are_notifications_enabled(struct vm_locked vm_locked)
{
return vm_locked.vm->notifications.enabled == true;
@@ -472,13 +505,6 @@
return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
}
-static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
- bool is_from_vm)
-{
- return is_from_vm ? &vm_locked.vm->notifications.from_vm
- : &vm_locked.vm->notifications.from_sp;
-}
-
static void vm_notifications_global_state_count_update(
ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
{