feat: queue interrupts upon CPU cycle allocation through FFA_RUN
Before an SP is allocated CPU cycles to start or extend a call chain,
SPMC must queue interrupts based on the action specified by the SP in
response to non-secure and other secure interrupts.
Currently, interrupts were queued only when CPU cycles were allocated
through FFA_MSG_SEND_DIRECT_REQ. This patch extends it for the
scenario where CPU cycles are allocated by NWd through FFA_RUN ABI.
Consequently, the interrupts are allowed to trigger when SP
relinquishes CPU cycles back through FFA_YIELD and FFA_MSG_WAIT
interfaces.
Change-Id: I517c1a3d13870487987379cad2a1e0bd5a816334
Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
diff --git a/src/api.c b/src/api.c
index 623111f..baa2d0e 100644
--- a/src/api.c
+++ b/src/api.c
@@ -285,6 +285,7 @@
assert(next_state == VCPU_STATE_BLOCKED);
+ plat_ffa_yield_prepare(current);
*next = api_switch_to_primary(
current,
(struct ffa_value){.func = FFA_YIELD_32,
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index 5699c22..954adc8 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -602,3 +602,8 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
+
+void plat_ffa_yield_prepare(struct vcpu *current)
+{
+ (void)current;
+}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 7a5924d..063ab3c 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -2259,3 +2259,8 @@
return ret;
}
+
+void plat_ffa_yield_prepare(struct vcpu *current)
+{
+ (void)current;
+}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 2ac2753..d027dfe 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -2233,6 +2233,15 @@
}
/*
+ * If CPU cycles were allocated through FFA_RUN interface, allow the
+ * interrupts(if they were masked earlier) before returning control
+ * to NWd.
+ */
+ if (current->rt_model == RTM_FFA_RUN) {
+ plat_ffa_vcpu_allow_interrupts(current);
+ }
+
+ /*
* The vCPU of an SP on secondary CPUs will invoke FFA_MSG_WAIT
* to indicate successful initialization to SPMC.
*/
@@ -2378,6 +2387,8 @@
vcpu->state == VCPU_STATE_BLOCKED);
}
+ plat_ffa_vcpu_queue_interrupts(target_locked);
+
vcpu_unlock(¤t_vcpu_locked);
}
@@ -2649,3 +2660,12 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
+
+void plat_ffa_yield_prepare(struct vcpu *current)
+{
+ /*
+ * Before returning control to NWd, allow the interrupts(if they were
+ * masked earlier).
+ */
+ plat_ffa_vcpu_allow_interrupts(current);
+}
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index b6a6a1a..4ca4e31 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -571,3 +571,8 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
+
+void plat_ffa_yield_prepare(struct vcpu *current)
+{
+ (void)current;
+}