feat(ipi): enable multiple SP to send IPIs to the same CPU
For each CPU keep a list of the target_vcpus with pending IPIs. Such
that if multiple SPs target vCPUs on the same physical CPU all the
target vCPUs will receive the interrupt. When a physical IPI IRQ is
received we forward the IPI to the current vCPU if it has a pending IPI
and then for any vCPUs with pending IPIs on the CPU we inject the
interrupt and send one SRI for them all.
Signed-off-by: Daniel Boulby <daniel.boulby@arm.com>
Change-Id: I47c496d79b47fde1e906b11028fd0637e6b1a011
diff --git a/inc/hf/cpu.h b/inc/hf/cpu.h
index 66cbadd..ad82e9f 100644
--- a/inc/hf/cpu.h
+++ b/inc/hf/cpu.h
@@ -34,14 +34,14 @@
/* In case there is a pending SRI for the NWd. */
bool is_sri_delayed;
- /* Track pending IPIs. */
- struct vcpu *ipi_target_vcpu;
-
/**
* A list of entries associated with vCPUs having pending timer
* deadline.
*/
struct timer_pending_vcpu_list pending_timer_vcpus_list;
+
+ /* Head of the list of vcpus with pending IPIs. */
+ struct list_entry pending_ipis;
};
void cpu_module_init(const cpu_id_t *cpu_ids, size_t count);
diff --git a/inc/hf/hf_ipi.h b/inc/hf/hf_ipi.h
index dd40b84..55b0dc3 100644
--- a/inc/hf/hf_ipi.h
+++ b/inc/hf/hf_ipi.h
@@ -13,6 +13,6 @@
#include "hf/vm.h"
void hf_ipi_init_interrupt(void);
-struct vcpu *hf_ipi_get_pending_target_vcpu(struct cpu *current);
+struct vcpu *hf_ipi_get_pending_target_vcpu(struct vcpu *current);
void hf_ipi_send_interrupt(struct vm *vm, ffa_vcpu_index_t target_vcpu_index);
bool hf_ipi_handle(struct vcpu_locked target_vcpu_locked);
diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h
index a5ad7c7..5565810 100644
--- a/inc/hf/vcpu.h
+++ b/inc/hf/vcpu.h
@@ -227,6 +227,12 @@
* safeguarded from concurrent accesses.
*/
struct list_entry timer_node;
+
+ /*
+ * List entry pointing to the next vcpu with an IPI pending on the
+ * same pinned CPU.
+ */
+ struct list_entry ipi_list_node;
};
/** Encapsulates a vCPU whose lock is held. */
diff --git a/src/cpu.c b/src/cpu.c
index 1125b1e..698779d 100644
--- a/src/cpu.c
+++ b/src/cpu.c
@@ -15,6 +15,7 @@
#include "hf/api.h"
#include "hf/check.h"
#include "hf/dlog.h"
+#include "hf/list.h"
#include "vmapi/hf/call.h"
@@ -114,6 +115,13 @@
* its `prev` and `next` fields point to itself.
*/
list_init(&(timer_list->root_entry));
+
+ /*
+ * Initialize the list of vCPUs with pending IPIs for
+ * each CPU. The root entry fields is configured such that
+ * its `prev` and `next` fields point to itself.
+ */
+ list_init(&c->pending_ipis);
}
if (!found_boot_cpu) {
diff --git a/src/ffa/spmc/interrupts.c b/src/ffa/spmc/interrupts.c
index e06d20a..53e9267 100644
--- a/src/ffa/spmc/interrupts.c
+++ b/src/ffa/spmc/interrupts.c
@@ -115,7 +115,11 @@
switch (interrupt_id) {
case HF_IPI_INTID:
- target_vcpu = hf_ipi_get_pending_target_vcpu(current->cpu);
+ /*
+ * Get the next vCPU with a pending IPI. If all vCPUs
+ * have had their IPIs handled this will return NULL.
+ */
+ target_vcpu = hf_ipi_get_pending_target_vcpu(current);
break;
case ARM_EL1_VIRT_TIMER_PHYS_INT:
/* Fall through */
@@ -125,10 +129,10 @@
default:
target_vcpu = plat_ffa_find_target_vcpu_secure_interrupt(
current, interrupt_id);
- }
- /* The target vCPU for a secure interrupt cannot be NULL. */
- CHECK(target_vcpu != NULL);
+ /* The target vCPU for a secure interrupt cannot be NULL. */
+ CHECK(target_vcpu != NULL);
+ }
return target_vcpu;
}
@@ -527,6 +531,12 @@
*/
plat_interrupts_end_of_interrupt(intid);
+ if (target_vcpu == NULL) {
+ /* No further handling required. Resume the current vCPU. */
+ *next = NULL;
+ return;
+ }
+
target_vm_locked = vm_lock(target_vcpu->vm);
if (target_vcpu == current) {
diff --git a/src/hf_ipi.c b/src/hf_ipi.c
index 568efde..c088f9d 100644
--- a/src/hf_ipi.c
+++ b/src/hf_ipi.c
@@ -34,22 +34,58 @@
}
/**
- * Returns the target_vcpu for the pending IPI on the current CPU and
- * resets the item in the list to NULL to show it has been retrieved.
+ * Returns the next target_vcpu with a pending IPI and removes it from
+ * the list for the current CPU to show it has been retrieved.
+ * The running vCPU is prioritised to prevent it being put into
+ * the PREEMPTED state before it has handled it's IPI, this could happen in
+ * the case a vCPU in the WAITING state also has a pending IPI.
+ * In the case of a spurious IPI physical interrupt, where the target
+ * vCPUs have already handled their pending IPIs return NULL.
*/
-struct vcpu *hf_ipi_get_pending_target_vcpu(struct cpu *current)
+struct vcpu *hf_ipi_get_pending_target_vcpu(struct vcpu *current)
{
- struct vcpu *ret;
+ struct list_entry *list;
+ struct vcpu *target_vcpu;
- sl_lock(¤t->lock);
+ /* Lock the CPU the list belongs to. */
+ sl_lock(¤t->cpu->lock);
- ret = current->ipi_target_vcpu;
+ /*
+ * Check if the current vcpu has a pending interrupt,
+ * if so prioritise this.
+ */
+ if (!list_empty(¤t->ipi_list_node)) {
+ list = ¤t->ipi_list_node;
+ } else {
+ /*
+ * If the current cpu doesn't have a pending IPI check other
+ * vcpus on the current CPU.
+ */
+ list = ¤t->cpu->pending_ipis;
- current->ipi_target_vcpu = NULL;
+ if (list_empty(list)) {
+ target_vcpu = NULL;
+ goto out;
+ }
- sl_unlock(¤t->lock);
+ /*
+ * The list is circular, the root element does not belong to a
+ * vCPU but is used to track if the list is empty and if not
+ * point to the first vCPU with a pending IPI.
+ */
+ list = list->next;
+ }
- return ret;
+ /*
+ * The next vCPU with a pending IPI has been retrieved to be handled
+ * so remove it from the list.
+ */
+ list_remove(list);
+ target_vcpu = CONTAINER_OF(list, struct vcpu, ipi_list_node);
+
+out:
+ sl_unlock(¤t->cpu->lock);
+ return target_vcpu;
}
/**
@@ -61,44 +97,179 @@
struct cpu *target_cpu = target_vcpu->cpu;
sl_lock(&target_cpu->lock);
+ /*
+ * Since vCPUs are pinned to a physical cpu they can only belong
+ * to one list. Therefore check if the vCPU is in a list. If not
+ * add it and send the IPI SGI.
+ */
+ if (list_empty(&target_vcpu->ipi_list_node)) {
+ list_prepend(&target_cpu->pending_ipis,
+ &target_vcpu->ipi_list_node);
- target_cpu->ipi_target_vcpu = target_vcpu;
+ plat_interrupts_send_sgi(HF_IPI_INTID, target_cpu, true);
+ }
sl_unlock(&target_cpu->lock);
- plat_interrupts_send_sgi(HF_IPI_INTID, target_cpu, true);
}
/**
- * IPI IRQ specific handling for the secure interrupt for each vCPU state:
- * - WAITING: Trigger an SRI so the NWd can schedule to target vCPU to run.
- * - RUNNING:
- * - PREEMPTED/BLOCKED: Return and allow the normal secure interrupt handling
- * to handle the interrupt as usual.
- * For all cases we must also mark the interrupt as complete from the
- * SPMC perspective.
- * Returns True if the IPI SGI has been handled.
- * False if further secure interrupt handling is required.
+ * Enum to track the next SRI action that should be performed for an IPI to
+ * a vCPU in the WAITING state.
+ */
+enum ipi_sri_action {
+ /* First entry into the handling function. */
+ IPI_SRI_ACTION_INIT,
+ /* For a waiting state trigger and SRI not delayed. */
+ IPI_SRI_ACTION_NOT_DELAYED,
+ /*
+ * For a waiting state set delayed SRI to prioritize a running vCPU,
+ * preventing the running vCPU becoming preempted.
+ */
+ IPI_SRI_ACTION_DELAYED,
+ /* SRI already set. */
+ IPI_SRI_ACTION_NONE,
+};
+
+/**
+ * IPI IRQ handling for each vCPU state, the ipi_sri_action is used to know
+ * which SRI action to use when there is a vCPU in the WAITING state.
+ * Elements of the list of vCPUs on the CPU with pending IPIs will be traversed
+ * and depending of the state of each, the handling specific to IPIs will be
+ * taken:
+ * - RUNNING: Set the ipi_sri_action to IPI_SRI_ACTION_DELAYED, so if an SRI
+ * is required for a different vCPU, the running (current) vCPU will still
+ * handle the IPI. Return false so that the normal secure interrupt handling
+ * continues.
+ * - WAITING: If the ipi_sri_action is IPI_SRI_ACTION_NONE, an SRI has either
+ * already been triggered or set to delayed so we don't need to do anything.
+ * Otherwise:
+ * - If the running vCPU has a pending IPI, the ipi_sri_action will be
+ * IPI_SRI_ACTION_DELAYED so set the SRI to delayed, this means the SRI
+ * will be triggered on the next world switch to NWd and the running
+ * vCPU will not be stopped before it has handled it's IPI. Set the
+ * ipi_sri_action to IPI_SRI_ACTION_NONE, as we only need to set the
+ * SRI once.
+ * - If the running vCPU does not have a pending IPI, the ipi_sri_action
+ * will either be IPI_SRI_ACTION_INIT, if we are in the head of the list,
+ * or IPI_SRI_ACTION_NOT_DELAYED, in these cases we want to trigger the
+ * SRI immediately, so the NWd can schedule the target vCPU to handle
+ * the IPI. Set the ipi_sri_action to IPI_SRI_ACTION_NONE as we only need
+ * to trigger the SRI once.
+ * - PREEMPTED/BLOCKED:
+ * - If it's the head of the list (indicated by
+ * IPI_SRI_ACTION_INIT), return false and allow normal secure interrupt
+ * handling to handle the interrupt as usual.
+ * - Otherwise queue the interrupt for the vCPU.
+ * Returns True if the IPI SGI has been fully handled.
+ * False if further secure interrupt handling is required, this will
+ * only be the case for the target vCPU head of the pending ipi list, if it
+ * is in the RUNNING, PREEMPTED or BLOCKED state.
+ */
+static bool hf_ipi_handle_list_element(struct vcpu_locked target_vcpu_locked,
+ enum ipi_sri_action *ipi_sri_action)
+{
+ bool ret = true;
+ struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
+
+ assert(ipi_sri_action != NULL);
+
+ vcpu_interrupt_inject(target_vcpu_locked, HF_IPI_INTID);
+
+ switch (target_vcpu->state) {
+ case VCPU_STATE_RUNNING:
+ if (*ipi_sri_action != IPI_SRI_ACTION_INIT) {
+ panic("%s: If present the RUNNING vCPU should be the "
+ "first to be handled.\n",
+ __func__);
+ }
+ /*
+ * Any SRI should be delayed to prioritize the running vCPU,
+ * preventing it from entering the PREEMPTED state by the SRI
+ * before the IPI is handled.
+ */
+ *ipi_sri_action = IPI_SRI_ACTION_DELAYED;
+ ret = false;
+ break;
+ case VCPU_STATE_WAITING:
+ if (*ipi_sri_action == IPI_SRI_ACTION_INIT ||
+ *ipi_sri_action == IPI_SRI_ACTION_NOT_DELAYED) {
+ /*
+ * The current target vCPU is either the first element
+ * in the pending list or there is not running vCPU in
+ * the list, so we are ok to trigger the SRI
+ * immediately.
+ */
+ ffa_notifications_sri_trigger_not_delayed(
+ target_vcpu->cpu);
+ } else if (*ipi_sri_action == IPI_SRI_ACTION_DELAYED) {
+ /*
+ * Otherwise a running vCPU has a pending IPI so set a
+ * delayed SRI, so as not to preempt the running vCPU
+ * before it is able to handle it's IPI.
+ */
+ ffa_notifications_sri_set_delayed(target_vcpu->cpu);
+ }
+ *ipi_sri_action = IPI_SRI_ACTION_NONE;
+ break;
+ case VCPU_STATE_BLOCKED:
+ case VCPU_STATE_PREEMPTED:
+ if (*ipi_sri_action == IPI_SRI_ACTION_INIT) {
+ /*
+ * The current target vCPU is the top of the list of
+ * pending IPIs so allow it to be handled by the default
+ * secure interrupt handling. Change the state to
+ * IPI_SRI_ACTION_NOT_DELAYED since there now can't be
+ * any running vCPUs with pending IPIs (it would have
+ * been the head of the list) so we are safe to trigger
+ * the SRI for any waiting vCPUs immediately.
+ */
+ *ipi_sri_action = IPI_SRI_ACTION_NOT_DELAYED;
+ ret = false;
+ } else {
+ /*
+ * Queue the pending virtual interrupt for
+ * target vcpu.
+ */
+ if (!vcpu_interrupt_queue_push(target_vcpu_locked,
+ HF_IPI_INTID)) {
+ panic("Exhausted interrupt queue for vcpu of "
+ "SP: %x interrupt: %u\n",
+ target_vcpu->vm->id, HF_IPI_INTID);
+ }
+ }
+ break;
+ default:
+ dlog_error(
+ "%s: unexpected state: %u handling an IPI for [%x %u]",
+ __func__, target_vcpu->state, target_vcpu->vm->id,
+ vcpu_index(target_vcpu));
+ }
+
+ return ret;
+}
+
+/**
+ * IPI IRQ specific handling for the secure interrupt.
*/
bool hf_ipi_handle(struct vcpu_locked target_vcpu_locked)
{
- struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
+ enum ipi_sri_action ipi_sri_action = IPI_SRI_ACTION_INIT;
+ bool ret = true;
- switch (target_vcpu->state) {
- case VCPU_STATE_WAITING:
- ffa_notifications_sri_trigger_not_delayed(target_vcpu->cpu);
- return true;
- case VCPU_STATE_RUNNING:
- case VCPU_STATE_BLOCKED:
- case VCPU_STATE_PREEMPTED:
- /*
- * Let the normal secure interrupt handling handle the
- * interrupt as usual.
- */
- return false;
- default:
- dlog_error("Unexpected state: %u handling an IPI for [%x %u]",
- target_vcpu->state, target_vcpu->vm->id,
- vcpu_index(target_vcpu));
- return true;
+ ret = hf_ipi_handle_list_element(target_vcpu_locked, &ipi_sri_action);
+
+ /*
+ * Clear the pending ipi list, handling the ipi for the remaining
+ * target vCPUs.
+ */
+ for (struct vcpu *target_vcpu =
+ hf_ipi_get_pending_target_vcpu(target_vcpu_locked.vcpu);
+ target_vcpu != NULL;
+ target_vcpu = hf_ipi_get_pending_target_vcpu(target_vcpu)) {
+ target_vcpu_locked = vcpu_lock(target_vcpu);
+ hf_ipi_handle_list_element(target_vcpu_locked, &ipi_sri_action);
+ vcpu_unlock(&target_vcpu_locked);
}
+
+ return ret;
}
diff --git a/src/vcpu.c b/src/vcpu.c
index f21cb17..6ff71a3 100644
--- a/src/vcpu.c
+++ b/src/vcpu.c
@@ -71,6 +71,7 @@
vcpu->rt_model = RTM_SP_INIT;
list_init(&vcpu->boot_list_node);
list_init(&vcpu->timer_node);
+ list_init(&vcpu->ipi_list_node);
}
/**