fix(ff-a): notification pending interrupt
Currently for S-EL1 partitions, the NPI is injected in two different
moments depending on the type of interrupt:
- For per-vCPU notifications NPI injected at the moment notifications
are set.
- For Global notifications NPI injected at context switch from the NWd
to the receiver SP.
This patch ammends what described above. The injection of the NPI now
happens at handling of FFA_RUN and FFA_MSG_SEND_DIRECT_REQ, when the
vCPU is resumed to running state.
Also, the injection of the NPI is done through the same method/function,
regardless of notifications type. This caters for a better
implementation, as it makes it easier to read and debug (if necessary).
For S-EL0 partitions, the NPI is not used. The S-EL0 partition must be
notified it has pending notifications through an IMPDEF direct messaging
request.
Change-Id: Ib1906c8e5de4be63ca7a7a1cafd25a4675b9b940
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/src/api.c b/src/api.c
index ac3a741..67900b4 100644
--- a/src/api.c
+++ b/src/api.c
@@ -687,14 +687,14 @@
* Prepares the vCPU to run by updating its state and fetching whether a return
* value needs to be forced onto the vCPU.
*/
-static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
+static bool api_vcpu_prepare_run(struct vcpu *current, struct vcpu *vcpu,
struct ffa_value *run_ret)
{
struct vcpu_locked vcpu_locked;
struct vm_locked vm_locked;
- bool need_vm_lock;
bool ret;
uint64_t timer_remaining_ns = FFA_SLEEP_INDEFINITE;
+ bool need_vm_lock;
/*
* Check that the registers are available so that the vCPU can be run.
@@ -715,9 +715,13 @@
}
#endif
-
/* The VM needs to be locked to deliver mailbox messages. */
- need_vm_lock = vcpu->state == VCPU_STATE_WAITING;
+ need_vm_lock = vcpu->state == VCPU_STATE_WAITING ||
+ (!vcpu->vm->el0_partition &&
+ (vcpu->state == VCPU_STATE_BLOCKED_INTERRUPT ||
+ vcpu->state == VCPU_STATE_BLOCKED ||
+ vcpu->state == VCPU_STATE_PREEMPTED));
+
if (need_vm_lock) {
vcpu_unlock(&vcpu_locked);
vm_locked = vm_lock(vcpu->vm);
@@ -775,6 +779,13 @@
break;
}
+ assert(need_vm_lock == true);
+ if (!vm_locked.vm->el0_partition &&
+ plat_ffa_inject_notification_pending_interrupt(
+ vcpu_locked, current, vm_locked)) {
+ break;
+ }
+
/*
* A pending message allows the vCPU to run so the message can
* be delivered directly.
@@ -805,6 +816,13 @@
ret = false;
goto out;
case VCPU_STATE_BLOCKED_INTERRUPT:
+ if (need_vm_lock &&
+ plat_ffa_inject_notification_pending_interrupt(
+ vcpu_locked, current, vm_locked)) {
+ assert(vcpu_interrupt_count_get(vcpu_locked) > 0);
+ break;
+ }
+
/* Allow virtual interrupts to be delivered. */
if (vcpu_interrupt_count_get(vcpu_locked) > 0) {
break;
@@ -837,6 +855,11 @@
case VCPU_STATE_BLOCKED:
/* A blocked vCPU is run unconditionally. Fall through. */
case VCPU_STATE_PREEMPTED:
+ /* Check NPI is to be injected here. */
+ if (need_vm_lock) {
+ plat_ffa_inject_notification_pending_interrupt(
+ vcpu_locked, current, vm_locked);
+ }
break;
default:
/*
@@ -866,7 +889,6 @@
if (need_vm_lock) {
vm_unlock(&vm_locked);
}
-
return ret;
}
@@ -2049,6 +2071,7 @@
{
struct ffa_value ret;
struct vm *receiver_vm;
+ struct vm_locked receiver_locked;
struct vcpu *receiver_vcpu;
struct two_vcpu_locked vcpus_locked;
@@ -2093,6 +2116,7 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
+ receiver_locked = vm_lock(receiver_vm);
vcpus_locked = vcpu_lock_both(receiver_vcpu, current);
/*
@@ -2164,6 +2188,20 @@
/* Switch to receiver vCPU targeted to by direct msg request */
*next = receiver_vcpu;
+ if (!receiver_locked.vm->el0_partition) {
+ /*
+ * If the scheduler in the system is giving CPU cycles to the
+ * receiver, due to pending notifications, inject the NPI
+ * interrupt. Following call assumes that '*next' has been set
+ * to receiver_vcpu.
+ */
+ plat_ffa_inject_notification_pending_interrupt(
+ vcpus_locked.vcpu1.vcpu == receiver_vcpu
+ ? vcpus_locked.vcpu1
+ : vcpus_locked.vcpu2,
+ current, receiver_locked);
+ }
+
/*
* Since this flow will lead to a VM switch, the return value will not
* be applied to current vCPU.
@@ -2172,6 +2210,7 @@
out:
sl_unlock(&receiver_vcpu->lock);
sl_unlock(¤t->lock);
+ vm_unlock(&receiver_locked);
return ret;
}
@@ -2978,22 +3017,6 @@
plat_ffa_sri_state_set(DELAYED);
}
- /*
- * If notifications set are per-vCPU and the receiver is SP, the
- * Notifications Pending Interrupt can be injected now.
- * If not, it should be injected when the scheduler gives it CPU cycles
- * in a specific vCPU.
- */
- if (is_per_vcpu && vm_id_is_current_world(receiver_vm_id)) {
- struct vcpu *target_vcpu =
- vm_get_vcpu(receiver_locked.vm, vcpu_id);
-
- dlog_verbose("Per-vCPU notification, pending NPI.\n");
- internal_interrupt_inject(target_vcpu,
- HF_NOTIFICATION_PENDING_INTID,
- current, NULL);
- }
-
ret = (struct ffa_value){.func = FFA_SUCCESS_32};
out:
vm_unlock(&receiver_locked);
@@ -3085,6 +3108,11 @@
plat_ffa_sri_state_set(HANDLED);
}
+ if (!receiver_locked.vm->el0_partition &&
+ !vm_are_global_notifications_pending(receiver_locked)) {
+ vm_notifications_set_npi_injected(receiver_locked, false);
+ }
+
out:
vm_unlock(&receiver_locked);