Provide support for FIQ interrupts

This patch introduces support for FIQ virtual interrupts by adding
an "interrupt type" configuration for each INTID.

Following changes are done:
  - Helpers added to inject a virtual FIQ by setting HCR_EL2.VF
  - hf_interrupt_enable hypervisor call updated with a new
    "interrupt type" argument

Nominally, we intend to signal an NS interrupt to a SP as a virtual FIQ
using GICv3. The same interrupt can be signaled as a virtual IRQ if such
option is passed in the Hafnium interrupt enable call.
The reasoning is to ease migration of TEEs relying on receiving physical
Group1 NS/Group0 interrupts as FIQ (or foreign interrupts).
The same TEE now running as a Secure Partition receives a virtual FIQ
for a NS interrupt (or Group0 interrupt).
As mentioned above, there is also the flexibility to receive all
interrupts (Group1 S/Group1 NS/Group0) to the same vIRQ vector.

Change-Id: I78daf1ae226ea9cc01f65da36ae31ed9fff84f42
Signed-off-by: Manish Pandey <manish.pandey2@arm.com>
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/inc/hf/api.h b/inc/hf/api.h
index e72d3cd..6a636a4 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -27,7 +27,8 @@
 struct vcpu *api_abort(struct vcpu *current);
 struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu);
 
-int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current);
+int64_t api_interrupt_enable(uint32_t intid, bool enable,
+			     enum interrupt_type type, struct vcpu *current);
 uint32_t api_interrupt_get(struct vcpu *current);
 int64_t api_interrupt_inject(ffa_vm_id_t target_vm_id,
 			     ffa_vcpu_index_t target_vcpu_idx, uint32_t intid,
diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h
index 3668c8d..7a2bdf3 100644
--- a/inc/hf/vcpu.h
+++ b/inc/hf/vcpu.h
@@ -41,12 +41,16 @@
 	uint32_t interrupt_enabled[HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS];
 	/** Bitfield keeping track of which interrupts are pending. */
 	uint32_t interrupt_pending[HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS];
+	/** Bitfield recording the interrupt pin configuration. */
+	uint32_t interrupt_type[HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS];
 	/**
 	 * The number of interrupts which are currently both enabled and
-	 * pending. i.e. the number of bits set in interrupt_enable &
-	 * interrupt_pending.
+	 * pending. Count independently virtual IRQ and FIQ interrupt types
+	 * i.e. the sum of the two counters is the number of bits set in
+	 * interrupt_enable & interrupt_pending.
 	 */
-	uint32_t enabled_and_pending_count;
+	uint32_t enabled_and_pending_irq_count;
+	uint32_t enabled_and_pending_fiq_count;
 };
 
 struct vcpu_fault_info {
@@ -116,3 +120,41 @@
 			    struct vcpu_fault_info *f);
 
 void vcpu_reset(struct vcpu *vcpu);
+
+static inline void vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)
+{
+	vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count++;
+}
+
+static inline void vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)
+{
+	vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count--;
+}
+
+static inline void vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)
+{
+	vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count++;
+}
+
+static inline void vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)
+{
+	vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count--;
+}
+
+static inline uint32_t vcpu_interrupt_irq_count_get(
+	struct vcpu_locked vcpu_locked)
+{
+	return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count;
+}
+
+static inline uint32_t vcpu_interrupt_fiq_count_get(
+	struct vcpu_locked vcpu_locked)
+{
+	return vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
+}
+
+static inline uint32_t vcpu_interrupt_count_get(struct vcpu_locked vcpu_locked)
+{
+	return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count +
+	       vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
+}
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index 900a74e..b8a07da 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -285,9 +285,10 @@
  *
  * Returns 0 on success, or -1 if the intid is invalid.
  */
-static inline int64_t hf_interrupt_enable(uint32_t intid, bool enable)
+static inline int64_t hf_interrupt_enable(uint32_t intid, bool enable,
+					  enum interrupt_type type)
 {
-	return hf_call(HF_INTERRUPT_ENABLE, intid, enable, 0);
+	return hf_call(HF_INTERRUPT_ENABLE, intid, enable, type);
 }
 
 /**
diff --git a/inc/vmapi/hf/types.h b/inc/vmapi/hf/types.h
index fa5128e..54bf2c7 100644
--- a/inc/vmapi/hf/types.h
+++ b/inc/vmapi/hf/types.h
@@ -51,3 +51,9 @@
 
 /** The virtual interrupt ID used for the virtual timer. */
 #define HF_VIRTUAL_TIMER_INTID 3
+
+/** Type of interrupts */
+enum interrupt_type {
+	INTERRUPT_TYPE_IRQ,
+	INTERRUPT_TYPE_FIQ,
+};
diff --git a/src/api.c b/src/api.c
index a675784..eb0511b 100644
--- a/src/api.c
+++ b/src/api.c
@@ -473,32 +473,36 @@
 	struct vcpu_locked target_locked, uint32_t intid, struct vcpu *current,
 	struct vcpu **next)
 {
+	struct vcpu *target_vcpu = target_locked.vcpu;
 	uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
-	uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
+	uint32_t intid_shift = intid % INTERRUPT_REGISTER_BITS;
+	uint32_t intid_mask = 1U << intid_shift;
 	int64_t ret = 0;
 
 	/*
-	 * We only need to change state and (maybe) trigger a virtual IRQ if it
-	 * is enabled and was not previously pending. Otherwise we can skip
-	 * everything except setting the pending bit.
-	 *
-	 * If you change this logic make sure to update the need_vm_lock logic
-	 * above to match.
+	 * We only need to change state and (maybe) trigger a virtual interrupt
+	 * if it is enabled and was not previously pending. Otherwise we can
+	 * skip everything except setting the pending bit.
 	 */
-	if (!(target_locked.vcpu->interrupts.interrupt_enabled[intid_index] &
-	      ~target_locked.vcpu->interrupts.interrupt_pending[intid_index] &
+	if (!(target_vcpu->interrupts.interrupt_enabled[intid_index] &
+	      ~target_vcpu->interrupts.interrupt_pending[intid_index] &
 	      intid_mask)) {
 		goto out;
 	}
 
 	/* Increment the count. */
-	target_locked.vcpu->interrupts.enabled_and_pending_count++;
+	if ((target_vcpu->interrupts.interrupt_type[intid_index] &
+	     intid_mask) == (INTERRUPT_TYPE_IRQ << intid_shift)) {
+		vcpu_irq_count_increment(target_locked);
+	} else {
+		vcpu_fiq_count_increment(target_locked);
+	}
 
 	/*
 	 * Only need to update state if there was not already an
 	 * interrupt enabled and pending.
 	 */
-	if (target_locked.vcpu->interrupts.enabled_and_pending_count != 1) {
+	if (vcpu_interrupt_count_get(target_locked) != 1) {
 		goto out;
 	}
 
@@ -508,14 +512,13 @@
 		 * should run or kick the target vCPU.
 		 */
 		ret = 1;
-	} else if (current != target_locked.vcpu && next != NULL) {
-		*next = api_wake_up(current, target_locked.vcpu);
+	} else if (current != target_vcpu && next != NULL) {
+		*next = api_wake_up(current, target_vcpu);
 	}
 
 out:
 	/* Either way, make it pending. */
-	target_locked.vcpu->interrupts.interrupt_pending[intid_index] |=
-		intid_mask;
+	target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
 
 	return ret;
 }
@@ -652,7 +655,7 @@
 		/* Fall through. */
 	case VCPU_STATE_BLOCKED_INTERRUPT:
 		/* Allow virtual interrupts to be delivered. */
-		if (vcpu->interrupts.enabled_and_pending_count > 0) {
+		if (vcpu_interrupt_count_get(vcpu_locked) > 0) {
 			break;
 		}
 
@@ -1211,17 +1214,18 @@
  */
 bool api_ffa_msg_recv_block_interrupted(struct vcpu *current)
 {
+	struct vcpu_locked current_locked;
 	bool interrupted;
 
-	sl_lock(&current->lock);
+	current_locked = vcpu_lock(current);
 
 	/*
 	 * Don't block if there are enabled and pending interrupts, to match
 	 * behaviour of wait_for_interrupt.
 	 */
-	interrupted = (current->interrupts.enabled_and_pending_count > 0);
+	interrupted = (vcpu_interrupt_count_get(current_locked) > 0);
 
-	sl_unlock(&current->lock);
+	vcpu_unlock(&current_locked);
 
 	return interrupted;
 }
@@ -1428,16 +1432,19 @@
  *
  * Returns 0 on success, or -1 if the intid is invalid.
  */
-int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current)
+int64_t api_interrupt_enable(uint32_t intid, bool enable,
+			     enum interrupt_type type, struct vcpu *current)
 {
+	struct vcpu_locked current_locked;
 	uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
-	uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
+	uint32_t intid_shift = intid % INTERRUPT_REGISTER_BITS;
+	uint32_t intid_mask = 1U << intid_shift;
 
 	if (intid >= HF_NUM_INTIDS) {
 		return -1;
 	}
 
-	sl_lock(&current->lock);
+	current_locked = vcpu_lock(current);
 	if (enable) {
 		/*
 		 * If it is pending and was not enabled before, increment the
@@ -1446,10 +1453,24 @@
 		if (current->interrupts.interrupt_pending[intid_index] &
 		    ~current->interrupts.interrupt_enabled[intid_index] &
 		    intid_mask) {
-			current->interrupts.enabled_and_pending_count++;
+			if ((current->interrupts.interrupt_type[intid_index] &
+			     intid_mask) ==
+			    (INTERRUPT_TYPE_IRQ << intid_shift)) {
+				vcpu_irq_count_increment(current_locked);
+			} else {
+				vcpu_fiq_count_increment(current_locked);
+			}
 		}
 		current->interrupts.interrupt_enabled[intid_index] |=
 			intid_mask;
+
+		if (type == INTERRUPT_TYPE_IRQ) {
+			current->interrupts.interrupt_type[intid_index] &=
+				~intid_mask;
+		} else if (type == INTERRUPT_TYPE_FIQ) {
+			current->interrupts.interrupt_type[intid_index] |=
+				intid_mask;
+		}
 	} else {
 		/*
 		 * If it is pending and was enabled before, decrement the count.
@@ -1457,13 +1478,20 @@
 		if (current->interrupts.interrupt_pending[intid_index] &
 		    current->interrupts.interrupt_enabled[intid_index] &
 		    intid_mask) {
-			current->interrupts.enabled_and_pending_count--;
+			if ((current->interrupts.interrupt_type[intid_index] &
+			     intid_mask) ==
+			    (INTERRUPT_TYPE_IRQ << intid_shift)) {
+				vcpu_irq_count_decrement(current_locked);
+			} else {
+				vcpu_fiq_count_decrement(current_locked);
+			}
 		}
 		current->interrupts.interrupt_enabled[intid_index] &=
 			~intid_mask;
+		current->interrupts.interrupt_type[intid_index] &= ~intid_mask;
 	}
 
-	sl_unlock(&current->lock);
+	vcpu_unlock(&current_locked);
 	return 0;
 }
 
@@ -1476,12 +1504,13 @@
 {
 	uint8_t i;
 	uint32_t first_interrupt = HF_INVALID_INTID;
+	struct vcpu_locked current_locked;
 
 	/*
 	 * Find the first enabled and pending interrupt ID, return it, and
 	 * deactivate it.
 	 */
-	sl_lock(&current->lock);
+	current_locked = vcpu_lock(current);
 	for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
 		uint32_t enabled_and_pending =
 			current->interrupts.interrupt_enabled[i] &
@@ -1489,19 +1518,27 @@
 
 		if (enabled_and_pending != 0) {
 			uint8_t bit_index = ctz(enabled_and_pending);
+			uint32_t intid_mask = 1U << bit_index;
+
 			/*
 			 * Mark it as no longer pending and decrement the count.
 			 */
-			current->interrupts.interrupt_pending[i] &=
-				~(1U << bit_index);
-			current->interrupts.enabled_and_pending_count--;
+			current->interrupts.interrupt_pending[i] &= ~intid_mask;
+
+			if ((current->interrupts.interrupt_type[i] &
+			     intid_mask) == (INTERRUPT_TYPE_IRQ << bit_index)) {
+				vcpu_irq_count_decrement(current_locked);
+			} else {
+				vcpu_fiq_count_decrement(current_locked);
+			}
+
 			first_interrupt =
 				i * INTERRUPT_REGISTER_BITS + bit_index;
 			break;
 		}
 	}
 
-	sl_unlock(&current->lock);
+	vcpu_unlock(&current_locked);
 	return first_interrupt;
 }
 
@@ -1561,9 +1598,11 @@
 
 	target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
 
-	dlog_info("Injecting IRQ %u for VM %#x vCPU %u from VM %#x vCPU %u\n",
-		  intid, target_vm_id, target_vcpu_idx, current->vm->id,
-		  vcpu_index(current));
+	dlog_verbose(
+		"Injecting interrupt %u for VM %#x vCPU %u from VM %#x vCPU "
+		"%u\n",
+		intid, target_vm_id, target_vcpu_idx, current->vm->id,
+		vcpu_index(current));
 	return internal_interrupt_inject(target_vcpu, intid, current, next);
 }
 
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index c37666f..32c58f2 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -235,7 +235,7 @@
  * Sets or clears the VI bit in the HCR_EL2 register saved in the given
  * arch_regs.
  */
-static void set_virtual_interrupt(struct arch_regs *r, bool enable)
+static void set_virtual_irq(struct arch_regs *r, bool enable)
 {
 	if (enable) {
 		r->lazy.hcr_el2 |= HCR_EL2_VI;
@@ -247,7 +247,7 @@
 /**
  * Sets or clears the VI bit in the HCR_EL2 register.
  */
-static void set_virtual_interrupt_current(bool enable)
+static void set_virtual_irq_current(bool enable)
 {
 	uintreg_t hcr_el2 = read_msr(hcr_el2);
 
@@ -259,6 +259,34 @@
 	write_msr(hcr_el2, hcr_el2);
 }
 
+/**
+ * Sets or clears the VF bit in the HCR_EL2 register saved in the given
+ * arch_regs.
+ */
+static void set_virtual_fiq(struct arch_regs *r, bool enable)
+{
+	if (enable) {
+		r->lazy.hcr_el2 |= HCR_EL2_VF;
+	} else {
+		r->lazy.hcr_el2 &= ~HCR_EL2_VF;
+	}
+}
+
+/**
+ * Sets or clears the VF bit in the HCR_EL2 register.
+ */
+static void set_virtual_fiq_current(bool enable)
+{
+	uintreg_t hcr_el2 = read_msr(hcr_el2);
+
+	if (enable) {
+		hcr_el2 |= HCR_EL2_VF;
+	} else {
+		hcr_el2 &= ~HCR_EL2_VF;
+	}
+	write_msr(hcr_el2, hcr_el2);
+}
+
 #if SECURE_WORLD == 1
 
 static bool sp_boot_next(struct vcpu *current, struct vcpu **next,
@@ -537,31 +565,36 @@
 }
 
 /**
- * Set or clear VI bit according to pending interrupts.
+ * Set or clear VI/VF bits according to pending interrupts.
  */
-static void update_vi(struct vcpu *next)
+static void vcpu_update_virtual_interrupts(struct vcpu *next)
 {
+	struct vcpu_locked vcpu_locked;
+
 	if (next == NULL) {
 		/*
 		 * Not switching vCPUs, set the bit for the current vCPU
 		 * directly in the register.
 		 */
-		struct vcpu *vcpu = current();
 
-		sl_lock(&vcpu->lock);
-		set_virtual_interrupt_current(
-			vcpu->interrupts.enabled_and_pending_count > 0);
-		sl_unlock(&vcpu->lock);
+		vcpu_locked = vcpu_lock(current());
+		set_virtual_irq_current(
+			vcpu_interrupt_irq_count_get(vcpu_locked) > 0);
+		set_virtual_fiq_current(
+			vcpu_interrupt_fiq_count_get(vcpu_locked) > 0);
+		vcpu_unlock(&vcpu_locked);
 	} else if (vm_id_is_current_world(next->vm->id)) {
 		/*
 		 * About to switch vCPUs, set the bit for the vCPU to which we
 		 * are switching in the saved copy of the register.
 		 */
-		sl_lock(&next->lock);
-		set_virtual_interrupt(
-			&next->regs,
-			next->interrupts.enabled_and_pending_count > 0);
-		sl_unlock(&next->lock);
+
+		vcpu_locked = vcpu_lock(next);
+		set_virtual_irq(&next->regs,
+				vcpu_interrupt_irq_count_get(vcpu_locked) > 0);
+		set_virtual_fiq(&next->regs,
+				vcpu_interrupt_fiq_count_get(vcpu_locked) > 0);
+		vcpu_unlock(&vcpu_locked);
 	}
 }
 
@@ -584,7 +617,7 @@
 
 	if (ffa_handler(&args, vcpu, next)) {
 		arch_regs_set_retval(&vcpu->regs, args);
-		update_vi(*next);
+		vcpu_update_virtual_interrupts(*next);
 		return true;
 	}
 
@@ -807,8 +840,8 @@
 		break;
 
 	case HF_INTERRUPT_ENABLE:
-		vcpu->regs.r[0] =
-			api_interrupt_enable(args.arg1, args.arg2, vcpu);
+		vcpu->regs.r[0] = api_interrupt_enable(args.arg1, args.arg2,
+						       args.arg3, vcpu);
 		break;
 
 	case HF_INTERRUPT_GET:
@@ -828,7 +861,7 @@
 		vcpu->regs.r[0] = SMCCC_ERROR_UNKNOWN;
 	}
 
-	update_vi(next);
+	vcpu_update_virtual_interrupts(next);
 
 	return next;
 }
diff --git a/src/arch/aarch64/sysregs.h b/src/arch/aarch64/sysregs.h
index add7fbb..3b95e68 100644
--- a/src/arch/aarch64/sysregs.h
+++ b/src/arch/aarch64/sysregs.h
@@ -416,6 +416,12 @@
 #define HCR_EL2_VI (UINT64_C(0x1) << 7)
 
 /**
+ * Virtual FIQ Interrupt.
+ * When set indicates that there is a virtual FIQ pending.
+ */
+#define HCR_EL2_VF (UINT64_C(0x1) << 6)
+
+/**
  * Physical SError Routing.
  * When set, physical SError interrupts are taken to EL2, unless routed to EL3.
  */
diff --git a/test/vmapi/arch/aarch64/gicv3/services/timer.c b/test/vmapi/arch/aarch64/gicv3/services/timer.c
index c9c323b..31475b8 100644
--- a/test/vmapi/arch/aarch64/gicv3/services/timer.c
+++ b/test/vmapi/arch/aarch64/gicv3/services/timer.c
@@ -50,7 +50,7 @@
 TEST_SERVICE(timer)
 {
 	exception_setup(irq_current, NULL);
-	hf_interrupt_enable(HF_VIRTUAL_TIMER_INTID, true);
+	hf_interrupt_enable(HF_VIRTUAL_TIMER_INTID, true, INTERRUPT_TYPE_IRQ);
 	arch_irq_enable();
 
 	for (;;) {
@@ -130,7 +130,7 @@
 	struct ffa_value res;
 
 	exception_setup(irq_current, NULL);
-	hf_interrupt_enable(HF_VIRTUAL_TIMER_INTID, true);
+	hf_interrupt_enable(HF_VIRTUAL_TIMER_INTID, true, INTERRUPT_TYPE_IRQ);
 	arch_irq_enable();
 
 	res = ffa_msg_wait();
diff --git a/test/vmapi/primary_with_secondaries/services/echo_with_notification.c b/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
index d81bc82..4b9ad66 100644
--- a/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
+++ b/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
@@ -41,7 +41,8 @@
 TEST_SERVICE(echo_with_notification)
 {
 	exception_setup(irq, NULL);
-	hf_interrupt_enable(HF_MAILBOX_WRITABLE_INTID, true);
+	hf_interrupt_enable(HF_MAILBOX_WRITABLE_INTID, true,
+			    INTERRUPT_TYPE_IRQ);
 
 	/* Loop, echo messages back to the sender. */
 	for (;;) {
diff --git a/test/vmapi/primary_with_secondaries/services/interruptible.c b/test/vmapi/primary_with_secondaries/services/interruptible.c
index 1a85f64..13573ff 100644
--- a/test/vmapi/primary_with_secondaries/services/interruptible.c
+++ b/test/vmapi/primary_with_secondaries/services/interruptible.c
@@ -58,9 +58,9 @@
 	void *recv_buf = SERVICE_RECV_BUFFER();
 
 	exception_setup(irq, NULL);
-	hf_interrupt_enable(SELF_INTERRUPT_ID, true);
-	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true);
-	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_B, true);
+	hf_interrupt_enable(SELF_INTERRUPT_ID, true, INTERRUPT_TYPE_IRQ);
+	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true, INTERRUPT_TYPE_IRQ);
+	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_B, true, INTERRUPT_TYPE_IRQ);
 	arch_irq_enable();
 
 	for (;;) {
@@ -80,7 +80,8 @@
 			   memcmp(recv_buf, enable_message,
 				  sizeof(enable_message)) == 0) {
 			/* Enable interrupt ID C. */
-			hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_C, true);
+			hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_C, true,
+					    INTERRUPT_TYPE_IRQ);
 		} else {
 			dlog("Got unexpected message from VM %d, size %d.\n",
 			     ffa_msg_send_sender(ret), ffa_msg_send_size(ret));
diff --git a/test/vmapi/primary_with_secondaries/services/interruptible_echo.c b/test/vmapi/primary_with_secondaries/services/interruptible_echo.c
index b13484f..9e5478a 100644
--- a/test/vmapi/primary_with_secondaries/services/interruptible_echo.c
+++ b/test/vmapi/primary_with_secondaries/services/interruptible_echo.c
@@ -31,7 +31,7 @@
 TEST_SERVICE(interruptible_echo)
 {
 	exception_setup(irq, NULL);
-	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true);
+	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true, INTERRUPT_TYPE_IRQ);
 	arch_irq_enable();
 
 	EXPECT_EQ(irq_counter, 0);
@@ -60,7 +60,7 @@
 	struct ffa_value res;
 
 	exception_setup(irq, NULL);
-	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true);
+	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true, INTERRUPT_TYPE_IRQ);
 	arch_irq_enable();
 
 	res = ffa_msg_wait();
@@ -93,7 +93,7 @@
 	struct ffa_value res;
 
 	exception_setup(irq, NULL);
-	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true);
+	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true, INTERRUPT_TYPE_IRQ);
 	arch_irq_enable();
 
 	EXPECT_EQ(irq_counter, 0);
diff --git a/test/vmapi/primary_with_secondaries/services/receive_block.c b/test/vmapi/primary_with_secondaries/services/receive_block.c
index d587a9c..edb4e3c 100644
--- a/test/vmapi/primary_with_secondaries/services/receive_block.c
+++ b/test/vmapi/primary_with_secondaries/services/receive_block.c
@@ -36,7 +36,7 @@
 
 	exception_setup(irq, NULL);
 	arch_irq_disable();
-	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true);
+	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true, INTERRUPT_TYPE_IRQ);
 
 	for (i = 0; i < 10; ++i) {
 		struct ffa_value res = ffa_msg_wait();
diff --git a/test/vmapi/primary_with_secondaries/services/wfi.c b/test/vmapi/primary_with_secondaries/services/wfi.c
index 09adb3d..92099f0 100644
--- a/test/vmapi/primary_with_secondaries/services/wfi.c
+++ b/test/vmapi/primary_with_secondaries/services/wfi.c
@@ -34,7 +34,7 @@
 
 	exception_setup(irq, NULL);
 	arch_irq_disable();
-	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true);
+	hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true, INTERRUPT_TYPE_IRQ);
 
 	for (i = 0; i < 10; ++i) {
 		interrupt_wait();