refactor(notifications): drop the SRI state

The SPMC defined a state machine to track when the SRI
had been signaled to the normal world until it has been
handled. It was considered to be handled when the SPMC
received an FFA_NOTIFICATION_INFO_GET ABI call, as it
was meant to be called whilst handling the SRI.

The intent of the state machine was to avoid having
multiple interrupts in the NWd, due to the SRI SGI
being sent concurrently in two different CPUs.

The referred state machine is dropped in this patch, given
that the worst it could happend in the described scenario
is that the last call to FFA_NOTIFICATION_INFO_GET receives
an empty list of partitions in need of CPU cycles.

Handling multiple SRI in the NWd is preferred compared to
missing the SRI being sent after a while.

This change also simplifies the implementation.

BREAKING: FF-A driver may need adjustment to cope with
multiple SRI interrupts firing at different CPUs.

Change-Id: I258ac9d303306b21c0341efbadb4fc8d3b7104a4
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/inc/hf/arch/plat/ffa.h b/inc/hf/arch/plat/ffa.h
index 6735907..5ded40e 100644
--- a/inc/hf/arch/plat/ffa.h
+++ b/inc/hf/arch/plat/ffa.h
@@ -15,44 +15,6 @@
 #include "hf/vcpu.h"
 #include "hf/vm.h"
 
-/**
- * The following enum relates to a state machine to guide the handling of the
- * Scheduler Receiver Interrupt.
- * The SRI is used to signal the receiver scheduler that there are pending
- * notifications for the receiver, and it is sent when there is a valid call to
- * FFA_NOTIFICATION_SET.
- * The FFA_NOTIFICATION_INFO_GET interface must be called in the SRI handler,
- * after which the FF-A driver should process the returned list, and request
- * the receiver scheduler to give the receiver CPU cycles to process the
- * notification.
- * The use of the following state machine allows for synchronized sending
- * and handling of the SRI, as well as avoiding the occurrence of spurious
- * SRI. A spurious SRI would be one such that upon handling a call to
- * FFA_NOTIFICATION_INFO_GET would return error FFA_NO_DATA, which is plausible
- * in an MP system.
- * The state machine also aims at resolving the delay of the SRI by setting
- * flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI in the arguments of the set call. By
- * delaying, the SRI is sent in context switching to the primary endpoint.
- * The SPMC is implemented under the assumption the receiver scheduler is a
- * NWd endpoint, hence the SRI is triggered at the world switch.
- * If concurrently another notification is set that requires immediate action,
- * the SRI is triggered immediately within that same execution context.
- *
- * HANDLED is the initial state, and means a new SRI can be sent. The following
- * state transitions are possible:
- * * HANDLED => DELAYED: Setting notification, and requesting SRI delay.
- * * HANDLED => TRIGGERED: Setting notification, and not requesting SRI delay.
- * * DELAYED => TRIGGERED: SRI was delayed, and the context switch to the
- * receiver scheduler is being done.
- * * DELAYED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
- * * TRIGGERED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
- */
-enum plat_ffa_sri_state {
-	HANDLED = 0,
-	DELAYED,
-	TRIGGERED,
-};
-
 /** Returns the SPMC ID. */
 struct ffa_value plat_ffa_spmc_id_get(void);
 
@@ -199,9 +161,6 @@
 					uint32_t *lists_count,
 					uint32_t ids_count_max);
 
-/** Helper to set SRI current state. */
-void plat_ffa_sri_state_set(enum plat_ffa_sri_state state);
-
 /**
  * Helper to send SRI and safely update `ffa_sri_state`, if there has been
  * a call to FFA_NOTIFICATION_SET, and the SRI has been delayed.
@@ -216,6 +175,12 @@
 void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu);
 
 /**
+ * Track that in current CPU there was a notification set with delay SRI
+ * flag.
+ */
+void plat_ffa_sri_set_delayed(struct cpu *cpu);
+
+/**
  * Initialize Schedule Receiver Interrupts needed in the context of
  * notifications support.
  */
diff --git a/inc/hf/cpu.h b/inc/hf/cpu.h
index a80431d..ac1f7d5 100644
--- a/inc/hf/cpu.h
+++ b/inc/hf/cpu.h
@@ -28,6 +28,9 @@
 
 	/** Determines whether the CPU is currently on. */
 	bool is_on;
+
+	/* In case there is a pending SRI for the NWd. */
+	bool is_sri_delayed;
 };
 
 void cpu_module_init(const cpu_id_t *cpu_ids, size_t count);
diff --git a/src/api.c b/src/api.c
index 0d13fe5..ccd0021 100644
--- a/src/api.c
+++ b/src/api.c
@@ -2138,7 +2138,7 @@
 				     vcpu_index(current));
 			plat_ffa_sri_trigger_not_delayed(current->cpu);
 		} else {
-			plat_ffa_sri_state_set(DELAYED);
+			plat_ffa_sri_set_delayed(current->cpu);
 		}
 	}
 
@@ -4426,7 +4426,7 @@
 			     vcpu_index(current));
 		plat_ffa_sri_trigger_not_delayed(current->cpu);
 	} else {
-		plat_ffa_sri_state_set(DELAYED);
+		plat_ffa_sri_set_delayed(current->cpu);
 	}
 
 	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
@@ -4537,14 +4537,6 @@
 	ret = api_ffa_notification_get_success_return(
 		sp_notifications, vm_notifications, framework_notifications);
 
-	/*
-	 * If there are no more pending notifications, change `sri_state` to
-	 * handled.
-	 */
-	if (vm_is_notifications_pending_count_zero()) {
-		plat_ffa_sri_state_set(HANDLED);
-	}
-
 	if (!receiver_locked.vm->el0_partition &&
 	    !vm_are_global_notifications_pending(receiver_locked)) {
 		vm_notifications_set_npi_injected(receiver_locked, false);
@@ -4663,8 +4655,6 @@
 			ids, ids_count, lists_sizes, lists_count);
 	}
 
-	plat_ffa_sri_state_set(HANDLED);
-
 	return result;
 }
 
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index e06c4a3..a7c1ac4 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -846,11 +846,6 @@
 	CHECK(false);
 }
 
-void plat_ffa_sri_state_set(enum plat_ffa_sri_state state)
-{
-	(void)state;
-}
-
 /**
  * An Hypervisor should send the SRI to the Primary Endpoint. Not implemented
  * as Hypervisor is only interesting for us for the sake of having a test
@@ -867,6 +862,15 @@
 	(void)cpu;
 }
 
+/**
+ * Track that in current CPU there was a notification set with delay SRI
+ * flag.
+ */
+void plat_ffa_sri_set_delayed(struct cpu *cpu)
+{
+	(void)cpu;
+}
+
 bool plat_ffa_inject_notification_pending_interrupt(
 	struct vcpu_locked target_locked, struct vcpu_locked current_locked,
 	struct vm_locked receiver_locked)
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 371ce4d..b998476 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -36,33 +36,6 @@
 /** Interrupt priority for the Schedule Receiver Interrupt. */
 #define SRI_PRIORITY 0x10U
 
-/** Encapsulates `sri_state` while the `sri_state_lock` is held. */
-struct sri_state_locked {
-	enum plat_ffa_sri_state *sri_state;
-};
-
-/** To globally keep track of the SRI handling. */
-static enum plat_ffa_sri_state sri_state = HANDLED;
-
-/** Lock to guard access to `sri_state`. */
-static struct spinlock sri_state_lock_instance = SPINLOCK_INIT;
-
-/** Locks `sri_state` guarding lock. */
-static struct sri_state_locked sri_state_lock(void)
-{
-	sl_lock(&sri_state_lock_instance);
-
-	return (struct sri_state_locked){.sri_state = &sri_state};
-}
-
-/** Unlocks `sri_state` guarding lock. */
-void sri_state_unlock(struct sri_state_locked sri_state_locked)
-{
-	assert(sri_state_locked.sri_state == &sri_state);
-	sri_state_locked.sri_state = NULL;
-	sl_unlock(&sri_state_lock_instance);
-}
-
 /**
  * The SPMC needs to keep track of some information about NWd VMs.
  * For the time being, only the notifications state structures.
@@ -1893,39 +1866,6 @@
 	return ffa_ret;
 }
 
-static void sri_state_set(struct sri_state_locked sri_state_locked,
-			  enum plat_ffa_sri_state state)
-{
-	assert(sri_state_locked.sri_state != NULL &&
-	       sri_state_locked.sri_state == &sri_state);
-
-	switch (*(sri_state_locked.sri_state)) {
-	case TRIGGERED:
-		/*
-		 * If flag to delay SRI is set, and SRI hasn't been
-		 * triggered state to delayed such that it is triggered
-		 * at context switch to the receiver scheduler.
-		 */
-		if (state == DELAYED) {
-			break;
-		}
-	case HANDLED:
-	case DELAYED:
-		*(sri_state_locked.sri_state) = state;
-		break;
-	default:
-		panic("Invalid SRI state\n");
-	}
-}
-
-void plat_ffa_sri_state_set(enum plat_ffa_sri_state state)
-{
-	struct sri_state_locked sri_state_locked = sri_state_lock();
-
-	sri_state_set(sri_state_locked, state);
-	sri_state_unlock(sri_state_locked);
-}
-
 static void plat_ffa_send_schedule_receiver_interrupt(struct cpu *cpu)
 {
 	dlog_verbose("Setting Schedule Receiver SGI %u on core: %zu\n",
@@ -1934,32 +1874,41 @@
 	plat_interrupts_send_sgi(HF_SCHEDULE_RECEIVER_INTID, cpu, false);
 }
 
+static void plat_ffa_sri_set_delayed_internal(struct cpu *cpu, bool delayed)
+{
+	assert(cpu != NULL);
+	cpu->is_sri_delayed = delayed;
+}
+
+void plat_ffa_sri_set_delayed(struct cpu *cpu)
+{
+	plat_ffa_sri_set_delayed_internal(cpu, true);
+}
+
+static bool plat_ffa_is_sri_delayed(struct cpu *cpu)
+{
+	assert(cpu != NULL);
+	return cpu->is_sri_delayed;
+}
+
 void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
 {
-	struct sri_state_locked sri_state_locked = sri_state_lock();
+	assert(cpu != NULL);
 
-	if (*(sri_state_locked.sri_state) == DELAYED) {
+	if (plat_ffa_is_sri_delayed(cpu)) {
 		plat_ffa_send_schedule_receiver_interrupt(cpu);
-		sri_state_set(sri_state_locked, TRIGGERED);
+		plat_ffa_sri_set_delayed_internal(cpu, false);
 	}
-
-	sri_state_unlock(sri_state_locked);
 }
 
 void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
 {
-	struct sri_state_locked sri_state_locked = sri_state_lock();
-
-	if (*(sri_state_locked.sri_state) == HANDLED) {
-		/*
-		 * If flag to delay SRI isn't set, trigger SRI such that the
-		 * receiver scheduler is aware there are pending notifications.
-		 */
-		plat_ffa_send_schedule_receiver_interrupt(cpu);
-		sri_state_set(sri_state_locked, TRIGGERED);
-	}
-
-	sri_state_unlock(sri_state_locked);
+	/*
+	 * If flag to delay SRI isn't set, trigger SRI such that the
+	 * receiver scheduler is aware there are pending notifications.
+	 */
+	plat_ffa_send_schedule_receiver_interrupt(cpu);
+	plat_ffa_sri_set_delayed_internal(cpu, false);
 }
 
 void plat_ffa_sri_init(struct cpu *cpu)
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 4e3b5b2..9ddef6d 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -352,11 +352,6 @@
 	(void)ids_count_max;
 }
 
-void plat_ffa_sri_state_set(enum plat_ffa_sri_state state)
-{
-	(void)state;
-}
-
 void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
 {
 	(void)cpu;
@@ -367,6 +362,11 @@
 	(void)cpu;
 }
 
+void plat_ffa_sri_set_delayed(struct cpu *cpu)
+{
+	(void)cpu;
+}
+
 bool plat_ffa_inject_notification_pending_interrupt(
 	struct vcpu_locked target_locked, struct vcpu_locked current_locked,
 	struct vm_locked receiver_locked)