feat(notifications): schedule receiver interrupt
Configuring Schedule Receiver Interrupt for each CPU, and send
respective SGI to the NWd. If flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI
is set, the SGI will be sent upon context switch from the SWd to the NWd
else it will be sent immediately, the sender SP execution will be
preempted.
A state machine was implemented to coordinate handling and sending of
the SRI.
Change-Id: If05a6535094f5da7189d8dbb55b04e7c1a1f80d7
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/inc/hf/api.h b/inc/hf/api.h
index bfbc646..b31d7b5 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -37,6 +37,7 @@
int64_t api_interrupt_inject_locked(struct vcpu_locked target_locked,
uint32_t intid, struct vcpu *current,
struct vcpu **next);
+void api_sri_send_if_delayed(struct vcpu *current);
struct ffa_value api_ffa_msg_send(ffa_vm_id_t sender_vm_id,
ffa_vm_id_t receiver_vm_id, uint32_t size,
diff --git a/inc/hf/arch/plat/ffa.h b/inc/hf/arch/plat/ffa.h
index aa7c26e..8ee0602 100644
--- a/inc/hf/arch/plat/ffa.h
+++ b/inc/hf/arch/plat/ffa.h
@@ -12,6 +12,44 @@
#include "hf/vcpu.h"
#include "hf/vm.h"
+/**
+ * The following enum relates to a state machine to guide the handling of the
+ * Scheduler Receiver Interrupt.
+ * The SRI is used to signal the receiver scheduler that there are pending
+ * notifications for the receiver, and it is sent when there is a valid call to
+ * FFA_NOTIFICATION_SET.
+ * The FFA_NOTIFICATION_INFO_GET interface must be called in the SRI handler,
+ * after which the FF-A driver should process the returned list, and request
+ * the receiver scheduler to give the receiver CPU cycles to process the
+ * notification.
+ * The use of the following state machine allows for synchronized sending
+ * and handling of the SRI, as well as avoiding the occurrence of spurious
+ * SRI. A spurious SRI would be one such that upon handling a call to
+ * FFA_NOTIFICATION_INFO_GET would return error FFA_NO_DATA, which is plausible
+ * in an MP system.
+ * The state machine also aims at resolving the delay of the SRI by setting
+ * flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI in the arguments of the set call. By
+ * delaying, the SRI is sent in context switching to the primary endpoint.
+ * The SPMC is implemented under the assumption the receiver scheduler is a
+ * NWd endpoint, hence the SRI is triggered at the world switch.
+ * If concurrently another notification is set that requires immediate action,
+ * the SRI is triggered immediately within that same execution context.
+ *
+ * HANDLED is the initial state, and means a new SRI can be sent. The following
+ * state transitions are possible:
+ * * HANDLED => DELAYED: Setting notification, and requesting SRI delay.
+ * * HANDLED => TRIGGERED: Setting notification, and not requesting SRI delay.
+ * * DELAYED => TRIGGERED: SRI was delayed, and the context switch to the
+ * receiver scheduler is being done.
+ * * DELAYED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
+ * * TRIGGERED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
+ */
+enum plat_ffa_sri_state {
+ HANDLED = 0,
+ DELAYED,
+ TRIGGERED,
+};
+
/** Returns information on features that are specific to the platform. */
struct ffa_value plat_ffa_features(uint32_t function_id);
/** Returns the SPMC ID. */
@@ -129,6 +167,28 @@
uint32_t *lists_count,
const uint32_t ids_count_max);
+/** Helper to set SRI current state. */
+void plat_ffa_sri_state_set(enum plat_ffa_sri_state state);
+
+/**
+ * Helper to send SRI and safely update `ffa_sri_state`, if there has been
+ * a call to FFA_NOTIFICATION_SET, and the SRI has been delayed.
+ * To be called at a context switch to the NWd.
+ */
+void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu);
+
+/**
+ * Helper to send SRI and safely update `ffa_sri_state`, if it hasn't been
+ * delayed in call to FFA_NOTIFICATION_SET.
+ */
+void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu);
+
+/**
+ * Initialize Schedule Receiver Interrupts needed in the context of
+ * notifications support.
+ */
+void plat_ffa_sri_init(struct cpu *cpu);
+
void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
uint32_t *lists_sizes,
uint32_t *lists_count,
diff --git a/inc/vmapi/hf/ffa.h b/inc/vmapi/hf/ffa.h
index cc87c42..f3d9d15 100644
--- a/inc/vmapi/hf/ffa.h
+++ b/inc/vmapi/hf/ffa.h
@@ -405,6 +405,8 @@
#define MAX_FFA_NOTIFICATIONS 64U
+#define FFA_SCHEDULE_RECEIVER_INTERRUPT_ID 8
+
/**
* Flag for notification bind and set, to specify call is about per-vCPU
* notifications.
@@ -443,6 +445,9 @@
#define FFA_NOTIFICATION_FLAG_BITMAP_SPM UINT32_C(0x1 << 2)
#define FFA_NOTIFICATION_FLAG_BITMAP_HYP UINT32_C(0x1 << 3)
+/** Flag for FFA_NOTIFICATION_SET to delay Schedule Receiver Interrupt */
+#define FFA_NOTIFICATIONS_FLAG_DELAY_SRI UINT32_C(0x1 << 1)
+
static inline ffa_vm_id_t ffa_notifications_get_receiver(struct ffa_value args)
{
return (args.arg1 >> 16) & 0xffffU;
diff --git a/src/api.c b/src/api.c
index cb27a58..54bde1d 100644
--- a/src/api.c
+++ b/src/api.c
@@ -2758,11 +2758,6 @@
bool is_per_vcpu = (flags & FFA_NOTIFICATION_FLAG_PER_VCPU) != 0U;
ffa_vcpu_index_t vcpu_id = (uint16_t)(flags >> 16);
- /*
- * TODO: cater for the delay_schedule_receiver flag when dealing with
- * schedule receiver interrupt.
- */
-
if (!plat_ffa_is_notification_set_valid(current, sender_vm_id,
receiver_vm_id)) {
dlog_verbose("Invalid use of notifications set interface.\n");
@@ -2819,8 +2814,15 @@
notifications, vcpu_id, is_per_vcpu);
dlog_verbose("Set the notifications: %x.\n", notifications);
- ret = (struct ffa_value){.func = FFA_SUCCESS_32};
+ if ((FFA_NOTIFICATIONS_FLAG_DELAY_SRI & flags) == 0) {
+ dlog_verbose("SRI was NOT delayed. vcpu: %u!\n",
+ vcpu_index(current));
+ plat_ffa_sri_trigger_not_delayed(current->cpu);
+ } else {
+ plat_ffa_sri_state_set(DELAYED);
+ }
+ ret = (struct ffa_value){.func = FFA_SUCCESS_32};
out:
vm_unlock(&receiver_locked);
@@ -2957,6 +2959,7 @@
uint32_t lists_count = 0;
uint32_t ids_count = 0;
bool list_is_full = false;
+ struct ffa_value result;
/*
* This interface can only be called at NS virtual/physical FF-A
@@ -3004,11 +3007,14 @@
if (ids_count == 0) {
dlog_verbose(
"Notification info get has no data to retrieve.\n");
- return ffa_error(FFA_NO_DATA);
+ result = ffa_error(FFA_NO_DATA);
+ } else {
+ result = api_ffa_notification_info_get_success_return(
+ ids, ids_count, lists_sizes, lists_count, list_is_full);
+ plat_ffa_sri_state_set(HANDLED);
}
- return api_ffa_notification_info_get_success_return(
- ids, ids_count, lists_sizes, lists_count, list_is_full);
+ return result;
}
struct ffa_value api_ffa_mem_perm_get(vaddr_t base_addr, struct vcpu *current)
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index d038ef3..a5f32cf 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -726,6 +726,16 @@
#endif
if (ffa_handler(&args, vcpu, next)) {
+#if SECURE_WORLD == 1
+ /*
+ * If giving back execution to the NWd, check if the Schedule
+ * Receiver Interrupt has been delayed, and trigger it if so.
+ */
+ if ((*next != NULL && (*next)->vm->id == HF_OTHER_WORLD_ID) ||
+ (*next == NULL && vcpu->vm->id == HF_OTHER_WORLD_ID)) {
+ plat_ffa_sri_trigger_if_delayed(vcpu->cpu);
+ }
+#endif
arch_regs_set_retval(&vcpu->regs, args);
vcpu_update_virtual_interrupts(*next);
return true;
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index baeee11..5fdc54f 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -285,3 +285,7 @@
(void)lists_count;
(void)ids_count_max;
}
+
+void plat_ffa_sri_init(void)
+{
+}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index dbc061b..6a2cb4f 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -6,6 +6,7 @@
* https://opensource.org/licenses/BSD-3-Clause.
*/
+#include "hf/arch/barriers.h"
#include "hf/arch/ffa.h"
#include "hf/arch/other_world.h"
#include "hf/arch/plat/ffa.h"
@@ -17,6 +18,7 @@
#include "hf/vcpu.h"
#include "hf/vm.h"
+#include "msr.h"
#include "smc.h"
#include "sysregs.h"
@@ -513,3 +515,29 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
+
+void plat_ffa_sri_state_set(enum plat_ffa_sri_state state)
+{
+ (void)state;
+}
+
+/**
+ * An Hypervisor should send the SRI to the Primary Endpoint. Not implemented
+ * as Hypervisor is only interesting for us for the sake of having a test
+ * intrastructure that encompasses the NWd, and we are not interested on
+ * in testing the flow of notifications between VMs only.
+ */
+void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+{
+ (void)cpu;
+}
+
+void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+{
+ (void)cpu;
+}
+
+void plat_ffa_sri_init(struct cpu *cpu)
+{
+ (void)cpu;
+}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 23cc039..0444b30 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -7,6 +7,8 @@
*/
#include "hf/arch/ffa.h"
+#include "hf/arch/mmu.h"
+#include "hf/arch/other_world.h"
#include "hf/arch/plat/ffa.h"
#include "hf/arch/sve.h"
@@ -14,15 +16,48 @@
#include "hf/dlog.h"
#include "hf/ffa.h"
#include "hf/ffa_internal.h"
+#include "hf/interrupt_desc.h"
#include "hf/plat/interrupts.h"
#include "hf/std.h"
+#include "hf/vcpu.h"
#include "hf/vm.h"
#include "vmapi/hf/ffa.h"
+#include "msr.h"
#include "smc.h"
#include "sysregs.h"
+/** Interrupt priority for the Schedule Receiver Interrupt. */
+#define SRI_PRIORITY 0x10U
+
+/** Encapsulates `sri_state` while the `sri_state_lock` is held. */
+struct sri_state_locked {
+ enum plat_ffa_sri_state *sri_state;
+};
+
+/** To globally keep track of the SRI handling. */
+static enum plat_ffa_sri_state sri_state = HANDLED;
+
+/** Lock to guard access to `sri_state`. */
+static struct spinlock sri_state_lock_instance = SPINLOCK_INIT;
+
+/** Locks `sri_state` guarding lock. */
+static struct sri_state_locked sri_state_lock(void)
+{
+ sl_lock(&sri_state_lock_instance);
+
+ return (struct sri_state_locked){.sri_state = &sri_state};
+}
+
+/** Unlocks `sri_state` guarding lock. */
+void sri_state_unlock(struct sri_state_locked sri_state_locked)
+{
+ CHECK(sri_state_locked.sri_state == &sri_state);
+ sri_state_locked.sri_state = NULL;
+ sl_unlock(&sri_state_lock_instance);
+}
+
/** Other world SVE context (accessed from other_world_loop). */
struct sve_context_t sve_context[MAX_CPUS];
@@ -1046,3 +1081,93 @@
return ffa_ret;
}
+
+static void sri_state_set(struct sri_state_locked sri_state_locked,
+ enum plat_ffa_sri_state state)
+{
+ CHECK(sri_state_locked.sri_state != NULL &&
+ sri_state_locked.sri_state == &sri_state);
+
+ switch (*(sri_state_locked.sri_state)) {
+ case TRIGGERED:
+ /*
+ * If flag to delay SRI is set, and SRI hasn't been
+ * triggered state to delayed such that it is triggered
+ * at context switch to the receiver scheduler.
+ */
+ if (state == DELAYED) {
+ break;
+ }
+ case HANDLED:
+ case DELAYED:
+ *(sri_state_locked.sri_state) = state;
+ break;
+ default:
+ panic("Invalid SRI state\n");
+ }
+}
+
+void plat_ffa_sri_state_set(enum plat_ffa_sri_state state)
+{
+ struct sri_state_locked sri_state_locked = sri_state_lock();
+
+ sri_state_set(sri_state_locked, state);
+ sri_state_unlock(sri_state_locked);
+}
+
+static void plat_ffa_send_schedule_receiver_interrupt(struct cpu *cpu)
+{
+ dlog_verbose("Setting Schedule Receiver SGI %d on core: %d\n",
+ FFA_SCHEDULE_RECEIVER_INTERRUPT_ID, cpu_index(cpu));
+
+ plat_interrupts_send_sgi(FFA_SCHEDULE_RECEIVER_INTERRUPT_ID, false,
+ (1 << cpu_index(cpu)), false);
+}
+
+void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+{
+ struct sri_state_locked sri_state_locked = sri_state_lock();
+
+ if (*(sri_state_locked.sri_state) == DELAYED) {
+ dlog_verbose("Triggering delayed SRI!\n");
+ plat_ffa_send_schedule_receiver_interrupt(cpu);
+ sri_state_set(sri_state_locked, TRIGGERED);
+ }
+
+ sri_state_unlock(sri_state_locked);
+}
+
+void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+{
+ struct sri_state_locked sri_state_locked = sri_state_lock();
+
+ if (*(sri_state_locked.sri_state) == HANDLED) {
+ /*
+ * If flag to delay SRI isn't set, trigger SRI such that the
+ * receiver scheduler is aware there are pending notifications.
+ */
+ dlog_verbose("Triggering not delayed SRI!\n");
+ plat_ffa_send_schedule_receiver_interrupt(cpu);
+ sri_state_set(sri_state_locked, TRIGGERED);
+ }
+
+ sri_state_unlock(sri_state_locked);
+}
+
+void plat_ffa_sri_init(struct cpu *cpu)
+{
+ struct interrupt_descriptor sri_desc;
+
+ /* TODO: when supported, make the interrupt driver use cpu structure. */
+ (void)cpu;
+
+ interrupt_desc_set_id(&sri_desc, FFA_SCHEDULE_RECEIVER_INTERRUPT_ID);
+ interrupt_desc_set_priority(&sri_desc, SRI_PRIORITY);
+ interrupt_desc_set_valid(&sri_desc, true);
+
+ /* Configure Interrupt as Non-Secure. */
+ interrupt_desc_set_type_config_sec_state(&sri_desc,
+ INT_DESC_TYPE_SGI << 2);
+
+ plat_interrupts_configure_interrupt(sri_desc);
+}
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 8736afa..04262bb 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -256,3 +256,23 @@
(void)lists_count;
(void)ids_count_max;
}
+
+void plat_ffa_sri_state_set(enum plat_ffa_sri_state state)
+{
+ (void)state;
+}
+
+void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+{
+ (void)cpu;
+}
+
+void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+{
+ (void)cpu;
+}
+
+void plat_ffa_sri_init(struct cpu *cpu)
+{
+ (void)cpu;
+}
diff --git a/src/main.c b/src/main.c
index 408a2d1..239ca7a 100644
--- a/src/main.c
+++ b/src/main.c
@@ -6,9 +6,14 @@
* https://opensource.org/licenses/BSD-3-Clause.
*/
+#include "hf/arch/plat/ffa.h"
+
#include "hf/cpu.h"
+#include "hf/dlog.h"
#include "hf/vm.h"
+#include "vmapi/hf/ffa.h"
+
/**
* The entry point of CPUs when they are turned on. It is supposed to initialise
* all state and return the first vCPU to run.
@@ -31,5 +36,8 @@
/* Reset the registers to give a clean start for vCPU. */
vcpu_reset(vcpu);
+ /* Initialize SRI for running core. */
+ plat_ffa_sri_init(c);
+
return vcpu;
}