Provide support for FIQ interrupts

This patch introduces support for FIQ virtual interrupts by adding
an "interrupt type" configuration for each INTID.

Following changes are done:
  - Helpers added to inject a virtual FIQ by setting HCR_EL2.VF
  - hf_interrupt_enable hypervisor call updated with a new
    "interrupt type" argument

Nominally, we intend to signal an NS interrupt to a SP as a virtual FIQ
using GICv3. The same interrupt can be signaled as a virtual IRQ if such
option is passed in the Hafnium interrupt enable call.
The reasoning is to ease migration of TEEs relying on receiving physical
Group1 NS/Group0 interrupts as FIQ (or foreign interrupts).
The same TEE now running as a Secure Partition receives a virtual FIQ
for a NS interrupt (or Group0 interrupt).
As mentioned above, there is also the flexibility to receive all
interrupts (Group1 S/Group1 NS/Group0) to the same vIRQ vector.

Change-Id: I78daf1ae226ea9cc01f65da36ae31ed9fff84f42
Signed-off-by: Manish Pandey <manish.pandey2@arm.com>
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/src/api.c b/src/api.c
index a675784..eb0511b 100644
--- a/src/api.c
+++ b/src/api.c
@@ -473,32 +473,36 @@
 	struct vcpu_locked target_locked, uint32_t intid, struct vcpu *current,
 	struct vcpu **next)
 {
+	struct vcpu *target_vcpu = target_locked.vcpu;
 	uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
-	uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
+	uint32_t intid_shift = intid % INTERRUPT_REGISTER_BITS;
+	uint32_t intid_mask = 1U << intid_shift;
 	int64_t ret = 0;
 
 	/*
-	 * We only need to change state and (maybe) trigger a virtual IRQ if it
-	 * is enabled and was not previously pending. Otherwise we can skip
-	 * everything except setting the pending bit.
-	 *
-	 * If you change this logic make sure to update the need_vm_lock logic
-	 * above to match.
+	 * We only need to change state and (maybe) trigger a virtual interrupt
+	 * if it is enabled and was not previously pending. Otherwise we can
+	 * skip everything except setting the pending bit.
 	 */
-	if (!(target_locked.vcpu->interrupts.interrupt_enabled[intid_index] &
-	      ~target_locked.vcpu->interrupts.interrupt_pending[intid_index] &
+	if (!(target_vcpu->interrupts.interrupt_enabled[intid_index] &
+	      ~target_vcpu->interrupts.interrupt_pending[intid_index] &
 	      intid_mask)) {
 		goto out;
 	}
 
 	/* Increment the count. */
-	target_locked.vcpu->interrupts.enabled_and_pending_count++;
+	if ((target_vcpu->interrupts.interrupt_type[intid_index] &
+	     intid_mask) == (INTERRUPT_TYPE_IRQ << intid_shift)) {
+		vcpu_irq_count_increment(target_locked);
+	} else {
+		vcpu_fiq_count_increment(target_locked);
+	}
 
 	/*
 	 * Only need to update state if there was not already an
 	 * interrupt enabled and pending.
 	 */
-	if (target_locked.vcpu->interrupts.enabled_and_pending_count != 1) {
+	if (vcpu_interrupt_count_get(target_locked) != 1) {
 		goto out;
 	}
 
@@ -508,14 +512,13 @@
 		 * should run or kick the target vCPU.
 		 */
 		ret = 1;
-	} else if (current != target_locked.vcpu && next != NULL) {
-		*next = api_wake_up(current, target_locked.vcpu);
+	} else if (current != target_vcpu && next != NULL) {
+		*next = api_wake_up(current, target_vcpu);
 	}
 
 out:
 	/* Either way, make it pending. */
-	target_locked.vcpu->interrupts.interrupt_pending[intid_index] |=
-		intid_mask;
+	target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
 
 	return ret;
 }
@@ -652,7 +655,7 @@
 		/* Fall through. */
 	case VCPU_STATE_BLOCKED_INTERRUPT:
 		/* Allow virtual interrupts to be delivered. */
-		if (vcpu->interrupts.enabled_and_pending_count > 0) {
+		if (vcpu_interrupt_count_get(vcpu_locked) > 0) {
 			break;
 		}
 
@@ -1211,17 +1214,18 @@
  */
 bool api_ffa_msg_recv_block_interrupted(struct vcpu *current)
 {
+	struct vcpu_locked current_locked;
 	bool interrupted;
 
-	sl_lock(&current->lock);
+	current_locked = vcpu_lock(current);
 
 	/*
 	 * Don't block if there are enabled and pending interrupts, to match
 	 * behaviour of wait_for_interrupt.
 	 */
-	interrupted = (current->interrupts.enabled_and_pending_count > 0);
+	interrupted = (vcpu_interrupt_count_get(current_locked) > 0);
 
-	sl_unlock(&current->lock);
+	vcpu_unlock(&current_locked);
 
 	return interrupted;
 }
@@ -1428,16 +1432,19 @@
  *
  * Returns 0 on success, or -1 if the intid is invalid.
  */
-int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current)
+int64_t api_interrupt_enable(uint32_t intid, bool enable,
+			     enum interrupt_type type, struct vcpu *current)
 {
+	struct vcpu_locked current_locked;
 	uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
-	uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
+	uint32_t intid_shift = intid % INTERRUPT_REGISTER_BITS;
+	uint32_t intid_mask = 1U << intid_shift;
 
 	if (intid >= HF_NUM_INTIDS) {
 		return -1;
 	}
 
-	sl_lock(&current->lock);
+	current_locked = vcpu_lock(current);
 	if (enable) {
 		/*
 		 * If it is pending and was not enabled before, increment the
@@ -1446,10 +1453,24 @@
 		if (current->interrupts.interrupt_pending[intid_index] &
 		    ~current->interrupts.interrupt_enabled[intid_index] &
 		    intid_mask) {
-			current->interrupts.enabled_and_pending_count++;
+			if ((current->interrupts.interrupt_type[intid_index] &
+			     intid_mask) ==
+			    (INTERRUPT_TYPE_IRQ << intid_shift)) {
+				vcpu_irq_count_increment(current_locked);
+			} else {
+				vcpu_fiq_count_increment(current_locked);
+			}
 		}
 		current->interrupts.interrupt_enabled[intid_index] |=
 			intid_mask;
+
+		if (type == INTERRUPT_TYPE_IRQ) {
+			current->interrupts.interrupt_type[intid_index] &=
+				~intid_mask;
+		} else if (type == INTERRUPT_TYPE_FIQ) {
+			current->interrupts.interrupt_type[intid_index] |=
+				intid_mask;
+		}
 	} else {
 		/*
 		 * If it is pending and was enabled before, decrement the count.
@@ -1457,13 +1478,20 @@
 		if (current->interrupts.interrupt_pending[intid_index] &
 		    current->interrupts.interrupt_enabled[intid_index] &
 		    intid_mask) {
-			current->interrupts.enabled_and_pending_count--;
+			if ((current->interrupts.interrupt_type[intid_index] &
+			     intid_mask) ==
+			    (INTERRUPT_TYPE_IRQ << intid_shift)) {
+				vcpu_irq_count_decrement(current_locked);
+			} else {
+				vcpu_fiq_count_decrement(current_locked);
+			}
 		}
 		current->interrupts.interrupt_enabled[intid_index] &=
 			~intid_mask;
+		current->interrupts.interrupt_type[intid_index] &= ~intid_mask;
 	}
 
-	sl_unlock(&current->lock);
+	vcpu_unlock(&current_locked);
 	return 0;
 }
 
@@ -1476,12 +1504,13 @@
 {
 	uint8_t i;
 	uint32_t first_interrupt = HF_INVALID_INTID;
+	struct vcpu_locked current_locked;
 
 	/*
 	 * Find the first enabled and pending interrupt ID, return it, and
 	 * deactivate it.
 	 */
-	sl_lock(&current->lock);
+	current_locked = vcpu_lock(current);
 	for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
 		uint32_t enabled_and_pending =
 			current->interrupts.interrupt_enabled[i] &
@@ -1489,19 +1518,27 @@
 
 		if (enabled_and_pending != 0) {
 			uint8_t bit_index = ctz(enabled_and_pending);
+			uint32_t intid_mask = 1U << bit_index;
+
 			/*
 			 * Mark it as no longer pending and decrement the count.
 			 */
-			current->interrupts.interrupt_pending[i] &=
-				~(1U << bit_index);
-			current->interrupts.enabled_and_pending_count--;
+			current->interrupts.interrupt_pending[i] &= ~intid_mask;
+
+			if ((current->interrupts.interrupt_type[i] &
+			     intid_mask) == (INTERRUPT_TYPE_IRQ << bit_index)) {
+				vcpu_irq_count_decrement(current_locked);
+			} else {
+				vcpu_fiq_count_decrement(current_locked);
+			}
+
 			first_interrupt =
 				i * INTERRUPT_REGISTER_BITS + bit_index;
 			break;
 		}
 	}
 
-	sl_unlock(&current->lock);
+	vcpu_unlock(&current_locked);
 	return first_interrupt;
 }
 
@@ -1561,9 +1598,11 @@
 
 	target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
 
-	dlog_info("Injecting IRQ %u for VM %#x vCPU %u from VM %#x vCPU %u\n",
-		  intid, target_vm_id, target_vcpu_idx, current->vm->id,
-		  vcpu_index(current));
+	dlog_verbose(
+		"Injecting interrupt %u for VM %#x vCPU %u from VM %#x vCPU "
+		"%u\n",
+		intid, target_vm_id, target_vcpu_idx, current->vm->id,
+		vcpu_index(current));
 	return internal_interrupt_inject(target_vcpu, intid, current, next);
 }
 
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index c37666f..32c58f2 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -235,7 +235,7 @@
  * Sets or clears the VI bit in the HCR_EL2 register saved in the given
  * arch_regs.
  */
-static void set_virtual_interrupt(struct arch_regs *r, bool enable)
+static void set_virtual_irq(struct arch_regs *r, bool enable)
 {
 	if (enable) {
 		r->lazy.hcr_el2 |= HCR_EL2_VI;
@@ -247,7 +247,7 @@
 /**
  * Sets or clears the VI bit in the HCR_EL2 register.
  */
-static void set_virtual_interrupt_current(bool enable)
+static void set_virtual_irq_current(bool enable)
 {
 	uintreg_t hcr_el2 = read_msr(hcr_el2);
 
@@ -259,6 +259,34 @@
 	write_msr(hcr_el2, hcr_el2);
 }
 
+/**
+ * Sets or clears the VF bit in the HCR_EL2 register saved in the given
+ * arch_regs.
+ */
+static void set_virtual_fiq(struct arch_regs *r, bool enable)
+{
+	if (enable) {
+		r->lazy.hcr_el2 |= HCR_EL2_VF;
+	} else {
+		r->lazy.hcr_el2 &= ~HCR_EL2_VF;
+	}
+}
+
+/**
+ * Sets or clears the VF bit in the HCR_EL2 register.
+ */
+static void set_virtual_fiq_current(bool enable)
+{
+	uintreg_t hcr_el2 = read_msr(hcr_el2);
+
+	if (enable) {
+		hcr_el2 |= HCR_EL2_VF;
+	} else {
+		hcr_el2 &= ~HCR_EL2_VF;
+	}
+	write_msr(hcr_el2, hcr_el2);
+}
+
 #if SECURE_WORLD == 1
 
 static bool sp_boot_next(struct vcpu *current, struct vcpu **next,
@@ -537,31 +565,36 @@
 }
 
 /**
- * Set or clear VI bit according to pending interrupts.
+ * Set or clear VI/VF bits according to pending interrupts.
  */
-static void update_vi(struct vcpu *next)
+static void vcpu_update_virtual_interrupts(struct vcpu *next)
 {
+	struct vcpu_locked vcpu_locked;
+
 	if (next == NULL) {
 		/*
 		 * Not switching vCPUs, set the bit for the current vCPU
 		 * directly in the register.
 		 */
-		struct vcpu *vcpu = current();
 
-		sl_lock(&vcpu->lock);
-		set_virtual_interrupt_current(
-			vcpu->interrupts.enabled_and_pending_count > 0);
-		sl_unlock(&vcpu->lock);
+		vcpu_locked = vcpu_lock(current());
+		set_virtual_irq_current(
+			vcpu_interrupt_irq_count_get(vcpu_locked) > 0);
+		set_virtual_fiq_current(
+			vcpu_interrupt_fiq_count_get(vcpu_locked) > 0);
+		vcpu_unlock(&vcpu_locked);
 	} else if (vm_id_is_current_world(next->vm->id)) {
 		/*
 		 * About to switch vCPUs, set the bit for the vCPU to which we
 		 * are switching in the saved copy of the register.
 		 */
-		sl_lock(&next->lock);
-		set_virtual_interrupt(
-			&next->regs,
-			next->interrupts.enabled_and_pending_count > 0);
-		sl_unlock(&next->lock);
+
+		vcpu_locked = vcpu_lock(next);
+		set_virtual_irq(&next->regs,
+				vcpu_interrupt_irq_count_get(vcpu_locked) > 0);
+		set_virtual_fiq(&next->regs,
+				vcpu_interrupt_fiq_count_get(vcpu_locked) > 0);
+		vcpu_unlock(&vcpu_locked);
 	}
 }
 
@@ -584,7 +617,7 @@
 
 	if (ffa_handler(&args, vcpu, next)) {
 		arch_regs_set_retval(&vcpu->regs, args);
-		update_vi(*next);
+		vcpu_update_virtual_interrupts(*next);
 		return true;
 	}
 
@@ -807,8 +840,8 @@
 		break;
 
 	case HF_INTERRUPT_ENABLE:
-		vcpu->regs.r[0] =
-			api_interrupt_enable(args.arg1, args.arg2, vcpu);
+		vcpu->regs.r[0] = api_interrupt_enable(args.arg1, args.arg2,
+						       args.arg3, vcpu);
 		break;
 
 	case HF_INTERRUPT_GET:
@@ -828,7 +861,7 @@
 		vcpu->regs.r[0] = SMCCC_ERROR_UNKNOWN;
 	}
 
-	update_vi(next);
+	vcpu_update_virtual_interrupts(next);
 
 	return next;
 }
diff --git a/src/arch/aarch64/sysregs.h b/src/arch/aarch64/sysregs.h
index add7fbb..3b95e68 100644
--- a/src/arch/aarch64/sysregs.h
+++ b/src/arch/aarch64/sysregs.h
@@ -416,6 +416,12 @@
 #define HCR_EL2_VI (UINT64_C(0x1) << 7)
 
 /**
+ * Virtual FIQ Interrupt.
+ * When set indicates that there is a virtual FIQ pending.
+ */
+#define HCR_EL2_VF (UINT64_C(0x1) << 6)
+
+/**
  * Physical SError Routing.
  * When set, physical SError interrupts are taken to EL2, unless routed to EL3.
  */