feat(notifications): schedule receiver interrupt

Configuring Schedule Receiver Interrupt for each CPU, and send
respective SGI to the NWd. If flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI
is set, the SGI will be sent upon context switch from the SWd to the NWd
else it will be sent immediately, the sender SP execution will be
preempted.
A state machine was implemented to coordinate handling and sending of
the SRI.

Change-Id: If05a6535094f5da7189d8dbb55b04e7c1a1f80d7
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/src/api.c b/src/api.c
index cb27a58..54bde1d 100644
--- a/src/api.c
+++ b/src/api.c
@@ -2758,11 +2758,6 @@
 	bool is_per_vcpu = (flags & FFA_NOTIFICATION_FLAG_PER_VCPU) != 0U;
 	ffa_vcpu_index_t vcpu_id = (uint16_t)(flags >> 16);
 
-	/*
-	 * TODO: cater for the delay_schedule_receiver flag when dealing with
-	 * schedule receiver interrupt.
-	 */
-
 	if (!plat_ffa_is_notification_set_valid(current, sender_vm_id,
 						receiver_vm_id)) {
 		dlog_verbose("Invalid use of notifications set interface.\n");
@@ -2819,8 +2814,15 @@
 			     notifications, vcpu_id, is_per_vcpu);
 	dlog_verbose("Set the notifications: %x.\n", notifications);
 
-	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
+	if ((FFA_NOTIFICATIONS_FLAG_DELAY_SRI & flags) == 0) {
+		dlog_verbose("SRI was NOT delayed. vcpu: %u!\n",
+			     vcpu_index(current));
+		plat_ffa_sri_trigger_not_delayed(current->cpu);
+	} else {
+		plat_ffa_sri_state_set(DELAYED);
+	}
 
+	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
 out:
 	vm_unlock(&receiver_locked);
 
@@ -2957,6 +2959,7 @@
 	uint32_t lists_count = 0;
 	uint32_t ids_count = 0;
 	bool list_is_full = false;
+	struct ffa_value result;
 
 	/*
 	 * This interface can only be called at NS virtual/physical FF-A
@@ -3004,11 +3007,14 @@
 	if (ids_count == 0) {
 		dlog_verbose(
 			"Notification info get has no data to retrieve.\n");
-		return ffa_error(FFA_NO_DATA);
+		result = ffa_error(FFA_NO_DATA);
+	} else {
+		result = api_ffa_notification_info_get_success_return(
+			ids, ids_count, lists_sizes, lists_count, list_is_full);
+		plat_ffa_sri_state_set(HANDLED);
 	}
 
-	return api_ffa_notification_info_get_success_return(
-		ids, ids_count, lists_sizes, lists_count, list_is_full);
+	return result;
 }
 
 struct ffa_value api_ffa_mem_perm_get(vaddr_t base_addr, struct vcpu *current)
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index d038ef3..a5f32cf 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -726,6 +726,16 @@
 #endif
 
 	if (ffa_handler(&args, vcpu, next)) {
+#if SECURE_WORLD == 1
+		/*
+		 * If giving back execution to the NWd, check if the Schedule
+		 * Receiver Interrupt has been delayed, and trigger it if so.
+		 */
+		if ((*next != NULL && (*next)->vm->id == HF_OTHER_WORLD_ID) ||
+		    (*next == NULL && vcpu->vm->id == HF_OTHER_WORLD_ID)) {
+			plat_ffa_sri_trigger_if_delayed(vcpu->cpu);
+		}
+#endif
 		arch_regs_set_retval(&vcpu->regs, args);
 		vcpu_update_virtual_interrupts(*next);
 		return true;
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index baeee11..5fdc54f 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -285,3 +285,7 @@
 	(void)lists_count;
 	(void)ids_count_max;
 }
+
+void plat_ffa_sri_init(void)
+{
+}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index dbc061b..6a2cb4f 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -6,6 +6,7 @@
  * https://opensource.org/licenses/BSD-3-Clause.
  */
 
+#include "hf/arch/barriers.h"
 #include "hf/arch/ffa.h"
 #include "hf/arch/other_world.h"
 #include "hf/arch/plat/ffa.h"
@@ -17,6 +18,7 @@
 #include "hf/vcpu.h"
 #include "hf/vm.h"
 
+#include "msr.h"
 #include "smc.h"
 #include "sysregs.h"
 
@@ -513,3 +515,29 @@
 
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
+
+void plat_ffa_sri_state_set(enum plat_ffa_sri_state state)
+{
+	(void)state;
+}
+
+/**
+ * An Hypervisor should send the SRI to the Primary Endpoint. Not implemented
+ * as Hypervisor is only interesting for us for the sake of having a test
+ * intrastructure that encompasses the NWd, and we are not interested on
+ * in testing the flow of notifications between VMs only.
+ */
+void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+{
+	(void)cpu;
+}
+
+void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+{
+	(void)cpu;
+}
+
+void plat_ffa_sri_init(struct cpu *cpu)
+{
+	(void)cpu;
+}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 23cc039..0444b30 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -7,6 +7,8 @@
  */
 
 #include "hf/arch/ffa.h"
+#include "hf/arch/mmu.h"
+#include "hf/arch/other_world.h"
 #include "hf/arch/plat/ffa.h"
 #include "hf/arch/sve.h"
 
@@ -14,15 +16,48 @@
 #include "hf/dlog.h"
 #include "hf/ffa.h"
 #include "hf/ffa_internal.h"
+#include "hf/interrupt_desc.h"
 #include "hf/plat/interrupts.h"
 #include "hf/std.h"
+#include "hf/vcpu.h"
 #include "hf/vm.h"
 
 #include "vmapi/hf/ffa.h"
 
+#include "msr.h"
 #include "smc.h"
 #include "sysregs.h"
 
+/** Interrupt priority for the Schedule Receiver Interrupt. */
+#define SRI_PRIORITY 0x10U
+
+/** Encapsulates `sri_state` while the `sri_state_lock` is held. */
+struct sri_state_locked {
+	enum plat_ffa_sri_state *sri_state;
+};
+
+/** To globally keep track of the SRI handling. */
+static enum plat_ffa_sri_state sri_state = HANDLED;
+
+/** Lock to guard access to `sri_state`. */
+static struct spinlock sri_state_lock_instance = SPINLOCK_INIT;
+
+/** Locks `sri_state` guarding lock. */
+static struct sri_state_locked sri_state_lock(void)
+{
+	sl_lock(&sri_state_lock_instance);
+
+	return (struct sri_state_locked){.sri_state = &sri_state};
+}
+
+/** Unlocks `sri_state` guarding lock. */
+void sri_state_unlock(struct sri_state_locked sri_state_locked)
+{
+	CHECK(sri_state_locked.sri_state == &sri_state);
+	sri_state_locked.sri_state = NULL;
+	sl_unlock(&sri_state_lock_instance);
+}
+
 /** Other world SVE context (accessed from other_world_loop). */
 struct sve_context_t sve_context[MAX_CPUS];
 
@@ -1046,3 +1081,93 @@
 
 	return ffa_ret;
 }
+
+static void sri_state_set(struct sri_state_locked sri_state_locked,
+			  enum plat_ffa_sri_state state)
+{
+	CHECK(sri_state_locked.sri_state != NULL &&
+	      sri_state_locked.sri_state == &sri_state);
+
+	switch (*(sri_state_locked.sri_state)) {
+	case TRIGGERED:
+		/*
+		 * If flag to delay SRI is set, and SRI hasn't been
+		 * triggered state to delayed such that it is triggered
+		 * at context switch to the receiver scheduler.
+		 */
+		if (state == DELAYED) {
+			break;
+		}
+	case HANDLED:
+	case DELAYED:
+		*(sri_state_locked.sri_state) = state;
+		break;
+	default:
+		panic("Invalid SRI state\n");
+	}
+}
+
+void plat_ffa_sri_state_set(enum plat_ffa_sri_state state)
+{
+	struct sri_state_locked sri_state_locked = sri_state_lock();
+
+	sri_state_set(sri_state_locked, state);
+	sri_state_unlock(sri_state_locked);
+}
+
+static void plat_ffa_send_schedule_receiver_interrupt(struct cpu *cpu)
+{
+	dlog_verbose("Setting Schedule Receiver SGI %d on core: %d\n",
+		     FFA_SCHEDULE_RECEIVER_INTERRUPT_ID, cpu_index(cpu));
+
+	plat_interrupts_send_sgi(FFA_SCHEDULE_RECEIVER_INTERRUPT_ID, false,
+				 (1 << cpu_index(cpu)), false);
+}
+
+void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+{
+	struct sri_state_locked sri_state_locked = sri_state_lock();
+
+	if (*(sri_state_locked.sri_state) == DELAYED) {
+		dlog_verbose("Triggering delayed SRI!\n");
+		plat_ffa_send_schedule_receiver_interrupt(cpu);
+		sri_state_set(sri_state_locked, TRIGGERED);
+	}
+
+	sri_state_unlock(sri_state_locked);
+}
+
+void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+{
+	struct sri_state_locked sri_state_locked = sri_state_lock();
+
+	if (*(sri_state_locked.sri_state) == HANDLED) {
+		/*
+		 * If flag to delay SRI isn't set, trigger SRI such that the
+		 * receiver scheduler is aware there are pending notifications.
+		 */
+		dlog_verbose("Triggering not delayed SRI!\n");
+		plat_ffa_send_schedule_receiver_interrupt(cpu);
+		sri_state_set(sri_state_locked, TRIGGERED);
+	}
+
+	sri_state_unlock(sri_state_locked);
+}
+
+void plat_ffa_sri_init(struct cpu *cpu)
+{
+	struct interrupt_descriptor sri_desc;
+
+	/* TODO: when supported, make the interrupt driver use cpu structure. */
+	(void)cpu;
+
+	interrupt_desc_set_id(&sri_desc, FFA_SCHEDULE_RECEIVER_INTERRUPT_ID);
+	interrupt_desc_set_priority(&sri_desc, SRI_PRIORITY);
+	interrupt_desc_set_valid(&sri_desc, true);
+
+	/* Configure Interrupt as Non-Secure. */
+	interrupt_desc_set_type_config_sec_state(&sri_desc,
+						 INT_DESC_TYPE_SGI << 2);
+
+	plat_interrupts_configure_interrupt(sri_desc);
+}
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 8736afa..04262bb 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -256,3 +256,23 @@
 	(void)lists_count;
 	(void)ids_count_max;
 }
+
+void plat_ffa_sri_state_set(enum plat_ffa_sri_state state)
+{
+	(void)state;
+}
+
+void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+{
+	(void)cpu;
+}
+
+void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+{
+	(void)cpu;
+}
+
+void plat_ffa_sri_init(struct cpu *cpu)
+{
+	(void)cpu;
+}
diff --git a/src/main.c b/src/main.c
index 408a2d1..239ca7a 100644
--- a/src/main.c
+++ b/src/main.c
@@ -6,9 +6,14 @@
  * https://opensource.org/licenses/BSD-3-Clause.
  */
 
+#include "hf/arch/plat/ffa.h"
+
 #include "hf/cpu.h"
+#include "hf/dlog.h"
 #include "hf/vm.h"
 
+#include "vmapi/hf/ffa.h"
+
 /**
  * The entry point of CPUs when they are turned on. It is supposed to initialise
  * all state and return the first vCPU to run.
@@ -31,5 +36,8 @@
 	/* Reset the registers to give a clean start for vCPU. */
 	vcpu_reset(vcpu);
 
+	/* Initialize SRI for running core. */
+	plat_ffa_sri_init(c);
+
 	return vcpu;
 }