Non-secure interrupt handling with managed exit

This patch implements the scenario when non-secure interrupt occurs
while executing a Secure Partition at S-EL1.

A non-secure interrupt traps as FIQ at S-EL2.
If the SP supports managed exit:
 1. The SPMC decreases the current core priority mask register to
    prevent other interrupts triggering while the managed exit
    operation is on-going.
 2. The SPMC injects a "managed exit" virtual interrupt to the current
    SP and resumes it.
 3. The SP traps to its interrupt handler (IRQ or FIQ depending on
    the interrupt enable call), it performs its housekeeping and returns
    with FFA_MSG_SEND_DIRECT_RESP.
 4. The SPMC on receiving FFA_MSG_SEND_DIRECT_RESP, increases the
    current core priority mask register and forwards the direct message
    response to the "other world".
 5. The SPMD transfers control back to non-secure world for handling of
    the non-secure interrupt. The SP driver can schedule again the SP
    by another direct message request.

If the SP does not support managed exit:
  1. The SP is pre-empted and its live context saved.
  2. The SPMC switches to "other world" with FFA_INTERRUPT.
  3. the SPMD forwards FFA_INTERRUPT to the normal world.
  4. The normal world handles the non-secure interrupt.
  5. The SP driver has the possiblity to schedule again
     the SP by ffa_run.

Change-Id: I7858e5ad49225d2c26e6d4c31f8f4d007d6a7d07
Signed-off-by: Manish Pandey <manish.pandey2@arm.com>
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/src/api.c b/src/api.c
index eb0511b..70df9f7 100644
--- a/src/api.c
+++ b/src/api.c
@@ -21,6 +21,7 @@
 #include "hf/ffa_memory.h"
 #include "hf/mm.h"
 #include "hf/plat/console.h"
+#include "hf/plat/interrupts.h"
 #include "hf/spinlock.h"
 #include "hf/static_assert.h"
 #include "hf/std.h"
@@ -174,9 +175,9 @@
  * Called in context of a direct message response from a secure
  * partition to a VM.
  */
-static struct vcpu *api_switch_to_other_world(struct vcpu *current,
-					      struct ffa_value other_world_ret,
-					      enum vcpu_state vcpu_state)
+struct vcpu *api_switch_to_other_world(struct vcpu *current,
+				       struct ffa_value other_world_ret,
+				       enum vcpu_state vcpu_state)
 {
 	return api_switch_to_vm(current, other_world_ret, vcpu_state,
 				HF_OTHER_WORLD_ID);
@@ -221,6 +222,16 @@
 }
 
 /**
+ * Returns true if the VM owning the given vCPU is supporting managed exit and
+ * the vCPU is currently processing a managed exit.
+ */
+static bool api_ffa_is_managed_exit_ongoing(struct vcpu_locked vcpu_locked)
+{
+	return (vcpu_locked.vcpu->vm->supports_managed_exit &&
+		vcpu_locked.vcpu->processing_managed_exit);
+}
+
+/**
  * Returns to the primary VM and signals that the vCPU still has work to do so.
  */
 struct vcpu *api_preempt(struct vcpu *current)
@@ -469,9 +480,9 @@
  *  - 1 if it was called by the primary VM and the primary VM now needs to wake
  *    up or kick the target vCPU.
  */
-static int64_t internal_interrupt_inject_locked(
-	struct vcpu_locked target_locked, uint32_t intid, struct vcpu *current,
-	struct vcpu **next)
+int64_t api_interrupt_inject_locked(struct vcpu_locked target_locked,
+				    uint32_t intid, struct vcpu *current,
+				    struct vcpu **next)
 {
 	struct vcpu *target_vcpu = target_locked.vcpu;
 	uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
@@ -532,8 +543,7 @@
 	struct vcpu_locked target_locked;
 
 	target_locked = vcpu_lock(target_vcpu);
-	ret = internal_interrupt_inject_locked(target_locked, intid, current,
-					       next);
+	ret = api_interrupt_inject_locked(target_locked, intid, current, next);
 	vcpu_unlock(&target_locked);
 
 	return ret;
@@ -1824,9 +1834,9 @@
 
 	/* Inject timer interrupt if any pending */
 	if (arch_timer_pending(&receiver_vcpu->regs)) {
-		internal_interrupt_inject_locked(vcpus_locked.vcpu1,
-						 HF_VIRTUAL_TIMER_INTID,
-						 current, NULL);
+		api_interrupt_inject_locked(vcpus_locked.vcpu1,
+					    HF_VIRTUAL_TIMER_INTID, current,
+					    NULL);
 
 		arch_timer_mask(&receiver_vcpu->regs);
 	}
@@ -1879,23 +1889,36 @@
 	}
 
 	current_locked = vcpu_lock(current);
-
-	/*
-	 * Ensure the terminating FFA_MSG_SEND_DIRECT_REQ had a
-	 * defined originator.
-	 */
-	if (!is_ffa_direct_msg_request_ongoing(current_locked)) {
+	if (api_ffa_is_managed_exit_ongoing(current_locked)) {
 		/*
-		 * Sending direct response but direct request origin vCPU is
-		 * not set.
+		 * No need for REQ/RESP state management as managed exit does
+		 * not have corresponding REQ pair.
 		 */
-		vcpu_unlock(&current_locked);
-		return ffa_error(FFA_DENIED);
-	}
+		if (receiver_vm_id != HF_PRIMARY_VM_ID) {
+			vcpu_unlock(&current_locked);
+			return ffa_error(FFA_DENIED);
+		}
 
-	if (current->direct_request_origin_vm_id != receiver_vm_id) {
-		vcpu_unlock(&current_locked);
-		return ffa_error(FFA_DENIED);
+		plat_interrupts_set_priority_mask(0xff);
+		current->processing_managed_exit = false;
+	} else {
+		/*
+		 * Ensure the terminating FFA_MSG_SEND_DIRECT_REQ had a
+		 * defined originator.
+		 */
+		if (!is_ffa_direct_msg_request_ongoing(current_locked)) {
+			/*
+			 * Sending direct response but direct request origin
+			 * vCPU is not set.
+			 */
+			vcpu_unlock(&current_locked);
+			return ffa_error(FFA_DENIED);
+		}
+
+		if (current->direct_request_origin_vm_id != receiver_vm_id) {
+			vcpu_unlock(&current_locked);
+			return ffa_error(FFA_DENIED);
+		}
 	}
 
 	/* Clear direct request origin for the caller. */
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 32c58f2..cd98722 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -20,6 +20,7 @@
 #include "hf/ffa.h"
 #include "hf/ffa_internal.h"
 #include "hf/panic.h"
+#include "hf/plat/interrupts.h"
 #include "hf/vm.h"
 
 #include "vmapi/hf/call.h"
@@ -882,6 +883,47 @@
 
 struct vcpu *fiq_lower(void)
 {
+#if SECURE_WORLD == 1
+	struct vcpu_locked current_locked;
+	struct vcpu *current_vcpu = current();
+	int ret;
+
+	if (current_vcpu->vm->supports_managed_exit) {
+		/* Mask all interrupts */
+		plat_interrupts_set_priority_mask(0x0);
+
+		current_locked = vcpu_lock(current_vcpu);
+		ret = api_interrupt_inject_locked(current_locked,
+						  HF_MANAGED_EXIT_INTID,
+						  current_vcpu, NULL);
+		if (ret != 0) {
+			panic("Failed to inject managed exit interrupt\n");
+		}
+
+		/* Entering managed exit sequence. */
+		current_vcpu->processing_managed_exit = true;
+
+		vcpu_unlock(&current_locked);
+
+		/*
+		 * Since we are in interrupt context, set the bit for the
+		 * current vCPU directly in the register.
+		 */
+		vcpu_update_virtual_interrupts(NULL);
+
+		/* Resume current vCPU. */
+		return NULL;
+	}
+
+	/*
+	 * SP does not support managed exit. It is pre-empted and execution
+	 * handed back to the normal world through the FFA_INTERRUPT ABI.
+	 * The SP can be resumed later by ffa_run. The call to irq_lower
+	 * and api_preempt is equivalent to calling api_switch_to_other_world
+	 * for current vCPU passing FFA_INTERRUPT_32.
+	 */
+#endif
+
 	return irq_lower();
 }