feat(arch timer): handle host timer interrupt tracking live deadline
When the timer deadline set by currently running vCPU expires, it
triggers the host(S-EL2 Physical) timer interrupt. SPMC then injects the
timer virtual interrupt to the current vCPU and either signals or
queues the timer virtual interrupt similar to any other secure virtual
interrupt.
SPMC also disables the host timer and deactivates the corresponding PPI
interrupt.
Change-Id: Ibc06e5a17fae060f8efd754e3866b08d64a3c28c
Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 978385c..c164064 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -8,6 +8,7 @@
#include "hf/arch/ffa.h"
#include "hf/arch/gicv3.h"
+#include "hf/arch/host_timer.h"
#include "hf/arch/mmu.h"
#include "hf/arch/other_world.h"
#include "hf/arch/plat/ffa.h"
@@ -25,6 +26,7 @@
#include "hf/interrupt_desc.h"
#include "hf/plat/interrupts.h"
#include "hf/std.h"
+#include "hf/timer_mgmt.h"
#include "hf/vcpu.h"
#include "hf/vm.h"
@@ -1516,6 +1518,11 @@
case HF_IPI_INTID:
target_vcpu = hf_ipi_get_pending_target_vcpu(current->cpu);
break;
+ case ARM_EL1_VIRT_TIMER_PHYS_INT:
+ /* Fall through */
+ case ARM_EL1_PHYS_TIMER_PHYS_INT:
+ panic("Timer interrupt not expected to fire: %u\n",
+ interrupt_id);
default:
target_vcpu = plat_ffa_find_target_vcpu_secure_interrupt(
current, interrupt_id);
@@ -1532,7 +1539,7 @@
* tracking the secure interrupt processing are set accordingly.
*/
static void plat_ffa_queue_vint(struct vcpu_locked target_vcpu_locked,
- uint32_t intid,
+ uint32_t vint_id,
struct vcpu_locked current_locked)
{
struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
@@ -1544,7 +1551,7 @@
}
/* Queue the pending virtual interrupt for target vcpu. */
- if (!vcpu_interrupt_queue_push(target_vcpu_locked, intid)) {
+ if (!vcpu_interrupt_queue_push(target_vcpu_locked, vint_id)) {
panic("Exhausted interrupt queue for vcpu of SP: %x\n",
target_vcpu->vm->id);
}
@@ -1556,7 +1563,7 @@
*/
static struct vcpu *plat_ffa_signal_secure_interrupt_sel0(
struct vcpu_locked current_locked,
- struct vcpu_locked target_vcpu_locked, uint32_t intid)
+ struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
{
struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
struct vcpu *next;
@@ -1566,7 +1573,7 @@
case VCPU_STATE_WAITING:
if (target_vcpu->cpu == current_locked.vcpu->cpu) {
struct ffa_value ret_interrupt =
- api_ffa_interrupt_return(intid);
+ api_ffa_interrupt_return(v_intid);
/* FF-A v1.1 EAC0 Table 8.1 case 1 and Table 12.10. */
dlog_verbose("S-EL0: Secure interrupt signaled: %x\n",
@@ -1581,7 +1588,7 @@
* If the execution was in NWd as well, set the vCPU
* in preempted state as well.
*/
- plat_ffa_queue_vint(target_vcpu_locked, intid,
+ plat_ffa_queue_vint(target_vcpu_locked, v_intid,
current_locked);
/* Switch to target vCPU responsible for this interrupt.
@@ -1596,7 +1603,7 @@
* resumes current vCPU.
*/
next = NULL;
- plat_ffa_queue_vint(target_vcpu_locked, intid,
+ plat_ffa_queue_vint(target_vcpu_locked, v_intid,
(struct vcpu_locked){.vcpu = NULL});
}
break;
@@ -1610,7 +1617,7 @@
* vCPU.
*/
next = NULL;
- plat_ffa_queue_vint(target_vcpu_locked, intid,
+ plat_ffa_queue_vint(target_vcpu_locked, v_intid,
(struct vcpu_locked){.vcpu = NULL});
break;
default:
@@ -1627,7 +1634,7 @@
*/
static struct vcpu *plat_ffa_signal_secure_interrupt_sel1(
struct vcpu_locked current_locked,
- struct vcpu_locked target_vcpu_locked, uint32_t intid)
+ struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
{
struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
struct vcpu *current = current_locked.vcpu;
@@ -1638,7 +1645,7 @@
case VCPU_STATE_WAITING:
if (target_vcpu->cpu == current_locked.vcpu->cpu) {
struct ffa_value ret_interrupt =
- api_ffa_interrupt_return(intid);
+ api_ffa_interrupt_return(v_intid);
/* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */
vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
@@ -1653,7 +1660,7 @@
*/
vcpu_set_running(target_vcpu_locked, &ret_interrupt);
- plat_ffa_queue_vint(target_vcpu_locked, intid,
+ plat_ffa_queue_vint(target_vcpu_locked, v_intid,
current_locked);
next = target_vcpu;
} else {
@@ -1666,7 +1673,7 @@
dlog_verbose("S-EL1: Secure interrupt queued: %x\n",
target_vcpu->vm->id);
next = NULL;
- plat_ffa_queue_vint(target_vcpu_locked, intid,
+ plat_ffa_queue_vint(target_vcpu_locked, v_intid,
(struct vcpu_locked){.vcpu = NULL});
}
break;
@@ -1679,7 +1686,7 @@
*/
assert(target_vcpu->vm->vcpu_count == 1);
next = NULL;
- plat_ffa_queue_vint(target_vcpu_locked, intid,
+ plat_ffa_queue_vint(target_vcpu_locked, v_intid,
(struct vcpu_locked){.vcpu = NULL});
} else if (current->vm->id == HF_OTHER_WORLD_ID) {
/*
@@ -1688,7 +1695,7 @@
* the current vCPU.
*/
next = NULL;
- plat_ffa_queue_vint(target_vcpu_locked, intid,
+ plat_ffa_queue_vint(target_vcpu_locked, v_intid,
(struct vcpu_locked){.vcpu = NULL});
} else {
struct ffa_value ret_interrupt =
@@ -1720,7 +1727,7 @@
*/
vcpu_set_running(target_vcpu_locked, &ret_interrupt);
- plat_ffa_queue_vint(target_vcpu_locked, intid,
+ plat_ffa_queue_vint(target_vcpu_locked, v_intid,
current_locked);
next = target_vcpu;
@@ -1760,7 +1767,7 @@
}
next = NULL;
- plat_ffa_queue_vint(target_vcpu_locked, intid,
+ plat_ffa_queue_vint(target_vcpu_locked, v_intid,
(struct vcpu_locked){.vcpu = NULL});
break;
@@ -1785,7 +1792,7 @@
assert(target_vcpu->vm->vcpu_count == 1);
}
next = NULL;
- plat_ffa_queue_vint(target_vcpu_locked, intid,
+ plat_ffa_queue_vint(target_vcpu_locked, v_intid,
(struct vcpu_locked){.vcpu = NULL});
break;
case VCPU_STATE_BLOCKED_INTERRUPT:
@@ -1817,23 +1824,32 @@
struct vcpu_locked current_locked;
uint32_t intid;
struct vm_locked target_vm_locked;
+ uint32_t v_intid;
/* Find pending interrupt id. This also activates the interrupt. */
intid = plat_interrupts_get_pending_interrupt_id();
+ v_intid = intid;
- /*
- * Spurious interrupt ID indicating that there are no pending
- * interrupts to acknowledge. For such scenarios, resume the current
- * vCPU.
- */
- if (intid == SPURIOUS_INTID_OTHER_WORLD) {
+ switch (intid) {
+ case ARM_SEL2_TIMER_PHYS_INT:
+ /* Disable the S-EL2 physical timer */
+ host_timer_disable();
+ target_vcpu = timer_find_target_vcpu(current);
+ v_intid = HF_VIRTUAL_TIMER_INTID;
+ break;
+ case SPURIOUS_INTID_OTHER_WORLD:
+ /*
+ * Spurious interrupt ID indicating that there are no pending
+ * interrupts to acknowledge. For such scenarios, resume the
+ * current vCPU.
+ */
*next = NULL;
return;
+ default:
+ target_vcpu = plat_ffa_find_target_vcpu(current, intid);
+ break;
}
- target_vcpu = plat_ffa_find_target_vcpu(current, intid);
- target_vm_locked = vm_lock(target_vcpu->vm);
-
/*
* End the interrupt to drop the running priority. It also deactivates
* the physical interrupt. If not, the interrupt could trigger again
@@ -1841,6 +1857,8 @@
*/
plat_interrupts_end_of_interrupt(intid);
+ target_vm_locked = vm_lock(target_vcpu->vm);
+
if (target_vcpu == current) {
current_locked = vcpu_lock(current);
target_vcpu_locked = current_locked;
@@ -1878,7 +1896,7 @@
assert(!target_vcpu->requires_deactivate_call);
/* Set the interrupt pending in the target vCPU. */
- vcpu_interrupt_inject(target_vcpu_locked, intid);
+ vcpu_interrupt_inject(target_vcpu_locked, v_intid);
switch (intid) {
case HF_IPI_INTID:
@@ -1898,10 +1916,10 @@
*next = target_vcpu_locked.vcpu->vm->el0_partition
? plat_ffa_signal_secure_interrupt_sel0(
current_locked,
- target_vcpu_locked, intid)
+ target_vcpu_locked, v_intid)
: plat_ffa_signal_secure_interrupt_sel1(
current_locked,
- target_vcpu_locked, intid);
+ target_vcpu_locked, v_intid);
}
}
diff --git a/src/timer_mgmt.c b/src/timer_mgmt.c
index 604ccb1..43d1c30 100644
--- a/src/timer_mgmt.c
+++ b/src/timer_mgmt.c
@@ -61,3 +61,78 @@
timer_list_remove_vcpu(vcpu->cpu, vcpu);
}
}
+
+/**
+ * A vCPU's timer entry is the last entry in the list if it's `next` field
+ * points to `root_entry` of the list.
+ */
+static inline bool timer_is_list_end(struct vcpu *vcpu,
+ struct timer_pending_vcpu_list *timer_list)
+{
+ return (vcpu->timer_node.next == &timer_list->root_entry);
+}
+
+/**
+ * Find the vCPU with the nearest timer deadline, being tracked by partition
+ * manager, on current CPU.
+ */
+struct vcpu *timer_find_vcpu_nearest_deadline(struct cpu *cpu)
+{
+ struct vcpu *vcpu_with_deadline = NULL;
+ struct vcpu *it_vcpu = NULL;
+ struct timer_pending_vcpu_list *timer_list;
+ uint64_t near_deadline = UINT64_MAX;
+ struct list_entry *next_timer_entry;
+
+ assert(cpu != NULL);
+
+ timer_list = &cpu->pending_timer_vcpus_list;
+ sl_lock(&cpu->lock);
+
+ if (list_empty(&timer_list->root_entry)) {
+ goto out;
+ }
+
+ next_timer_entry = timer_list->root_entry.next;
+
+ /* Iterate to find the vCPU with nearest deadline. */
+ do {
+ uint64_t expiry_ns;
+
+ /* vCPU iterator. */
+ it_vcpu =
+ CONTAINER_OF(next_timer_entry, struct vcpu, timer_node);
+ assert(arch_timer_enabled(&it_vcpu->regs));
+
+ expiry_ns = arch_timer_remaining_ns(&it_vcpu->regs);
+
+ if (expiry_ns < near_deadline) {
+ near_deadline = expiry_ns;
+ vcpu_with_deadline = it_vcpu;
+ }
+
+ /* Look at the next entry in the list. */
+ next_timer_entry = it_vcpu->timer_node.next;
+ } while (!timer_is_list_end(it_vcpu, timer_list));
+
+out:
+ sl_unlock(&cpu->lock);
+ return vcpu_with_deadline;
+}
+
+/**
+ * Find the vCPU whose timer deadline has expired and needs to be resumed at
+ * the earliest.
+ */
+struct vcpu *timer_find_target_vcpu(struct vcpu *current)
+{
+ struct vcpu *target_vcpu;
+
+ if (current->vm->id == HF_OTHER_WORLD_ID) {
+ target_vcpu = timer_find_vcpu_nearest_deadline(current->cpu);
+ } else {
+ target_vcpu = current;
+ }
+
+ return target_vcpu;
+}