feat(ipi): introduce IPI paravirtualised interface

Interprocessor Interrupts (IPIs) allow an SP to send an interrupt to
itself on another CPU. This patch starts the implementation of this
feature and enables it for the case where the SP is in the RUNNING
state on the target_vcpu.

Signed-off-by: Daniel Boulby <daniel.boulby@arm.com>
Change-Id: Idd0e1a5863730ae0f169bd0f56ac3abcd2916870
diff --git a/src/BUILD.gn b/src/BUILD.gn
index 69325f2..92f4be8 100644
--- a/src/BUILD.gn
+++ b/src/BUILD.gn
@@ -28,6 +28,7 @@
     "api.c",
     "cpio.c",
     "ffa_memory.c",
+    "hf_ipi.c",
     "init.c",
     "load.c",
     "main.c",
diff --git a/src/api.c b/src/api.c
index 5bac913..ed7f772 100644
--- a/src/api.c
+++ b/src/api.c
@@ -23,6 +23,7 @@
 #include "hf/ffa_internal.h"
 #include "hf/ffa_memory.h"
 #include "hf/ffa_v1_0.h"
+#include "hf/hf_ipi.h"
 #include "hf/mm.h"
 #include "hf/plat/console.h"
 #include "hf/plat/interrupts.h"
@@ -2250,6 +2251,7 @@
  * - NPI
  * - ME
  * - Virtual Timer.
+ * - IPI
  *
  * These are VIs with no expected interrupt descriptor.
  */
@@ -2257,7 +2259,7 @@
 {
 	return intid == HF_NOTIFICATION_PENDING_INTID ||
 	       intid == HF_MANAGED_EXIT_INTID ||
-	       intid == HF_VIRTUAL_TIMER_INTID;
+	       intid == HF_VIRTUAL_TIMER_INTID || intid == HF_IPI_INTID;
 }
 
 /**
@@ -4851,3 +4853,27 @@
 	vcpu_unlock(&vcpu_locked);
 	return (struct ffa_value){.func = FFA_SUCCESS_32};
 }
+
+/**
+ * Send an IPI interrupt to a target vcpu belonging to the
+ * sender that isn't itself.
+ */
+uint64_t api_hf_interrupt_send_ipi(uint32_t target_vcpu_id,
+				   struct vcpu *current)
+{
+	struct vm *vm = current->vm;
+	ffa_vcpu_index_t target_vcpu_index = vcpu_id_to_index(target_vcpu_id);
+
+	if (target_vcpu_index >= vm->vcpu_count &&
+	    target_vcpu_index == cpu_index(current->cpu)) {
+		dlog_verbose("Invalid vCPU %d for IPI.\n", target_vcpu_id);
+		return -1;
+	}
+
+	dlog_verbose("Injecting IPI to target vCPU%d for %#x\n", target_vcpu_id,
+		     vm->id);
+
+	hf_ipi_send_interrupt(vm, target_vcpu_index);
+
+	return 0;
+}
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 0244eae..107cdab 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -23,6 +23,7 @@
 #include "hf/dlog.h"
 #include "hf/ffa.h"
 #include "hf/ffa_internal.h"
+#include "hf/hf_ipi.h"
 #include "hf/panic.h"
 #include "hf/plat/interrupts.h"
 #include "hf/vm.h"
@@ -1086,6 +1087,10 @@
 		vcpu->regs.r[0] = plat_ffa_interrupt_reconfigure(
 			args.arg1, args.arg2, args.arg3, vcpu);
 		break;
+
+	case HF_INTERRUPT_SEND_IPI:
+		vcpu->regs.r[0] = api_hf_interrupt_send_ipi(args.arg1, vcpu);
+		break;
 #endif
 	case HF_INTERRUPT_ENABLE:
 		vcpu->regs.r[0] = api_interrupt_enable(args.arg1, args.arg2,
diff --git a/src/arch/aarch64/hypervisor/psci_handler.c b/src/arch/aarch64/hypervisor/psci_handler.c
index 5c0ce87..4702301 100644
--- a/src/arch/aarch64/hypervisor/psci_handler.c
+++ b/src/arch/aarch64/hypervisor/psci_handler.c
@@ -215,16 +215,6 @@
 }
 
 /**
- * Convert a PSCI CPU / affinity ID for a secondary VM to the corresponding vCPU
- * index.
- */
-ffa_vcpu_index_t vcpu_id_to_index(cpu_id_t vcpu_id)
-{
-	/* For now we use indices as IDs for the purposes of PSCI. */
-	return vcpu_id;
-}
-
-/**
  * Handles PSCI requests received via HVC or SMC instructions from a secondary
  * VM.
  *
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 807d5a5..6eeb82b 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -20,6 +20,7 @@
 #include "hf/ffa.h"
 #include "hf/ffa_internal.h"
 #include "hf/ffa_memory.h"
+#include "hf/hf_ipi.h"
 #include "hf/interrupt_desc.h"
 #include "hf/plat/interrupts.h"
 #include "hf/std.h"
@@ -1448,38 +1449,49 @@
 	}
 }
 
-static struct vcpu *plat_ffa_find_target_vcpu(struct vcpu *current,
-					      uint32_t interrupt_id)
+static struct vcpu *plat_ffa_find_target_vcpu_secure_interrupt(
+	struct vcpu *current, uint32_t interrupt_id)
 {
-	bool target_vm_found = false;
-	struct vm *vm;
-	struct vcpu *target_vcpu;
-	struct interrupt_descriptor int_desc;
-
 	/*
-	 * Find which VM/SP owns this interrupt. We then find the corresponding
-	 * vCPU context for this CPU.
+	 * Find which VM/SP owns this interrupt. We then find the
+	 * corresponding vCPU context for this CPU.
 	 */
 	for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) {
-		vm = vm_find_index(index);
+		struct vm *vm = vm_find_index(index);
 
 		for (uint32_t j = 0; j < HF_NUM_INTIDS; j++) {
-			int_desc = vm->interrupt_desc[j];
+			struct interrupt_descriptor int_desc =
+				vm->interrupt_desc[j];
 
-			/* Interrupt descriptors are populated contiguously. */
+			/*
+			 * Interrupt descriptors are populated
+			 * contiguously.
+			 */
 			if (!int_desc.valid) {
 				break;
 			}
 			if (int_desc.interrupt_id == interrupt_id) {
-				target_vm_found = true;
-				goto out;
+				return api_ffa_get_vm_vcpu(vm, current);
 			}
 		}
 	}
-out:
-	CHECK(target_vm_found);
 
-	target_vcpu = api_ffa_get_vm_vcpu(vm, current);
+	return NULL;
+}
+
+static struct vcpu *plat_ffa_find_target_vcpu(struct vcpu *current,
+					      uint32_t interrupt_id)
+{
+	struct vcpu *target_vcpu;
+
+	switch (interrupt_id) {
+	case HF_IPI_INTID:
+		target_vcpu = hf_ipi_get_pending_target_vcpu(current->cpu);
+		break;
+	default:
+		target_vcpu = plat_ffa_find_target_vcpu_secure_interrupt(
+			current, interrupt_id);
+	}
 
 	/* The target vCPU for a secure interrupt cannot be NULL. */
 	CHECK(target_vcpu != NULL);
@@ -1866,26 +1878,38 @@
 		/* Resume current vCPU. */
 		*next = NULL;
 	} else {
-		/*
-		 * Do not currently support nested interrupts as such, masking
-		 * interrupts.
-		 */
-		plat_ffa_mask_interrupts(target_vcpu_locked);
-
 		/* Set the interrupt pending in the target vCPU. */
 		vcpu_interrupt_inject(target_vcpu_locked, intid);
 
-		/*
-		 * Either invoke the handler related to partitions from S-EL0 or
-		 * from S-EL1.
-		 */
-		*next = target_vcpu_locked.vcpu->vm->el0_partition
-				? plat_ffa_signal_secure_interrupt_sel0(
-					  current_locked, target_vcpu_locked,
-					  intid)
-				: plat_ffa_signal_secure_interrupt_sel1(
-					  current_locked, target_vcpu_locked,
-					  intid);
+		switch (intid) {
+		case HF_IPI_INTID:
+			if (hf_ipi_handle(target_vcpu_locked)) {
+				*next = NULL;
+				break;
+			}
+			/*
+			 * Fall through in the case handling has not been fully
+			 * completed.
+			 */
+		default:
+			/*
+			 * Do not currently support nested interrupts as such,
+			 * masking interrupts.
+			 */
+			plat_ffa_mask_interrupts(target_vcpu_locked);
+
+			/*
+			 * Either invoke the handler related to partitions from
+			 * S-EL0 or from S-EL1.
+			 */
+			*next = target_vcpu_locked.vcpu->vm->el0_partition
+					? plat_ffa_signal_secure_interrupt_sel0(
+						  current_locked,
+						  target_vcpu_locked, intid)
+					: plat_ffa_signal_secure_interrupt_sel1(
+						  current_locked,
+						  target_vcpu_locked, intid);
+		}
 	}
 
 	if (target_vcpu_locked.vcpu != NULL) {
diff --git a/src/arch/aarch64/plat/psci/spmc.c b/src/arch/aarch64/plat/psci/spmc.c
index 7f9fb01..18f910b 100644
--- a/src/arch/aarch64/plat/psci/spmc.c
+++ b/src/arch/aarch64/plat/psci/spmc.c
@@ -13,6 +13,7 @@
 #include "hf/check.h"
 #include "hf/cpu.h"
 #include "hf/dlog.h"
+#include "hf/hf_ipi.h"
 #include "hf/vm.h"
 
 #include "vmapi/hf/types.h"
@@ -74,6 +75,8 @@
 
 	arch_cpu_init(c);
 
+	/* Initialize IPI for running core. */
+	hf_ipi_init_interrupt();
 	/* Initialize SRI for running core. */
 	plat_ffa_sri_init(c);
 
diff --git a/src/hf_ipi.c b/src/hf_ipi.c
new file mode 100644
index 0000000..b0221a1
--- /dev/null
+++ b/src/hf_ipi.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2024 The Hafnium Authors.
+ *
+ * Use of this source code is governed by a BSD-style
+ * license that can be found in the LICENSE file or at
+ * https://opensource.org/licenses/BSD-3-Clause.
+ */
+
+#include "hf/hf_ipi.h"
+
+#include "hf/cpu.h"
+#include "hf/plat/interrupts.h"
+
+/** Interrupt priority for Inter-Processor Interrupt. */
+#define IPI_PRIORITY 0x0U
+
+/**
+ * Initialize the IPI SGI.
+ */
+void hf_ipi_init_interrupt(void)
+{
+	/* Configure as a Secure SGI. */
+	struct interrupt_descriptor ipi_desc = {
+		.interrupt_id = HF_IPI_INTID,
+		.type = INT_DESC_TYPE_SGI,
+		.sec_state = INT_DESC_SEC_STATE_S,
+		.priority = IPI_PRIORITY,
+		.valid = true,
+	};
+
+	plat_interrupts_configure_interrupt(ipi_desc);
+}
+
+/**
+ * Returns the target_vcpu for the pending IPI on the current CPU and
+ * resets the item in the list to NULL to show it has been retrieved.
+ */
+struct vcpu *hf_ipi_get_pending_target_vcpu(struct cpu *current)
+{
+	struct vcpu *ret = current->ipi_target_vcpu;
+
+	current->ipi_target_vcpu = NULL;
+	return ret;
+}
+
+/**
+ * Send and record the IPI for the target vCPU.
+ */
+void hf_ipi_send_interrupt(struct vm *vm, ffa_vcpu_index_t target_vcpu_index)
+{
+	struct vcpu *target_vcpu = vm_get_vcpu(vm, target_vcpu_index);
+	struct cpu *target_cpu = target_vcpu->cpu;
+
+	target_cpu->ipi_target_vcpu = target_vcpu;
+	plat_interrupts_send_sgi(HF_IPI_INTID, target_cpu, true);
+}
+
+/**
+ * IPI IRQ specific handling for the secure interrupt for each vCPU state:
+ *   - RUNNING: Continue secure interrupt handling as normal, injecting
+ *   a virtual interrupt to the vCPU.
+ *   - Other states are not currently supported so exit the handler.
+ * Returns True if the IPI SGI has been handled.
+ * False if further secure interrupt handling is required.
+ */
+bool hf_ipi_handle(struct vcpu_locked target_vcpu_locked)
+{
+	struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
+
+	switch (target_vcpu->state) {
+	case VCPU_STATE_RUNNING:
+		return false;
+	default:
+		dlog_verbose(
+			"IPIs not currently supported for when the target_vcpu "
+			"is in the state %d\n",
+			target_vcpu->state);
+		/*
+		 * Mark the interrupt as completed so it can be signalled again.
+		 */
+		plat_interrupts_end_of_interrupt(HF_IPI_INTID);
+		return true;
+	}
+}