fix(interrupts): save and restore priority mask

To simplify secure interrupt handling, the threshold for interrupts
was changed by modifying the priority mask without saving the current
value of priority mask. This led to overriding of original priority
mask. This patch performs save and restore of priority mask at
appropriate times.

Please note the current design makes the assumption that the target vCPU
of a secure interrupt is pinned to the same physical CPU on which the
secure interrupt triggered. The target vCPU has to be resumed on the current
CPU in order for it to service the virtual interrupt. This design limitation
simplifies the interrupt management implementation in SPMC.

Change-Id: I8571cb81595080a0555346e075591234a78e510d
Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h
index 9206632..85cb657 100644
--- a/inc/hf/vcpu.h
+++ b/inc/hf/vcpu.h
@@ -120,6 +120,12 @@
 	 * triggered.
 	 */
 	struct vcpu *preempted_vcpu;
+
+	/**
+	 * Current value of the Priority Mask register which is saved/restored
+	 * during secure interrupt handling.
+	 */
+	uint8_t priority_mask;
 };
 
 /** Encapsulates a vCPU whose lock is held. */
diff --git a/src/api.c b/src/api.c
index d735b9e..ee99eee 100644
--- a/src/api.c
+++ b/src/api.c
@@ -2264,7 +2264,7 @@
 		 */
 		CHECK(!current->processing_secure_interrupt);
 
-		plat_interrupts_set_priority_mask(0xff);
+		plat_interrupts_set_priority_mask(current->priority_mask);
 		current->processing_managed_exit = false;
 	} else {
 		/*
@@ -2297,8 +2297,9 @@
 			/* There is no preempted vCPU to resume. */
 			CHECK(current->preempted_vcpu == NULL);
 
-			/* Unmask interrupts. */
-			plat_interrupts_set_priority_mask(0xff);
+			/* Restore interrupt priority mask. */
+			plat_interrupts_set_priority_mask(
+				current->priority_mask);
 
 			/*
 			 * Clear fields corresponding to secure interrupt
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index fca3d0f..6fe3179 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -1083,10 +1083,13 @@
 	int64_t ret;
 
 	if (plat_ffa_vm_managed_exit_supported(current_vcpu->vm)) {
+		uint8_t pmr = plat_interrupts_get_priority_mask();
+
 		/* Mask all interrupts */
 		plat_interrupts_set_priority_mask(0x0);
 
 		current_locked = vcpu_lock(current_vcpu);
+		current_vcpu->priority_mask = pmr;
 		ret = api_interrupt_inject_locked(current_locked,
 						  HF_MANAGED_EXIT_INTID,
 						  current_vcpu, NULL);
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 891ec0e..b067079 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -759,8 +759,9 @@
 
 			CHECK(target_vcpu == current->preempted_vcpu);
 
-			/* Unmask interrupts. */
-			plat_interrupts_set_priority_mask(0xff);
+			/* Restore interrupt priority mask. */
+			plat_interrupts_set_priority_mask(
+				current->priority_mask);
 
 			/*
 			 * Clear fields corresponding to secure interrupt
@@ -879,6 +880,12 @@
  Message Processing	Signalable with ME	Signalable	Signalable
  Interrupt Handling	Queued			Queued		Queued
  --------------------------------------------------------------------------
+
+ * Note 1: TODO: The current design makes the assumption that the target vCPU
+ * of a secure interrupt is pinned to the same physical CPU on which the
+ * secure interrupt triggered. The target vCPU has to be resumed on the current
+ * CPU in order for it to service the virtual interrupt. This design limitation
+ * simplifies the interrupt management implementation in SPMC.
  */
 static struct vcpu_locked plat_ffa_secure_interrupt_prepare(
 	struct vcpu *current, uint32_t *int_id)
@@ -887,6 +894,7 @@
 	struct vcpu_locked target_vcpu_locked;
 	struct vcpu *target_vcpu;
 	uint32_t id;
+	uint8_t priority_mask;
 
 	/* Find pending interrupt id. This also activates the interrupt. */
 	id = plat_interrupts_get_pending_interrupt_id();
@@ -902,10 +910,14 @@
 	 * TODO: Temporarily mask all interrupts to disallow high priority
 	 * interrupts from pre-empting current interrupt processing.
 	 */
+	priority_mask = plat_interrupts_get_priority_mask();
 	plat_interrupts_set_priority_mask(0x0);
 
 	target_vcpu_locked = vcpu_lock(target_vcpu);
 
+	/* Save current value of priority mask. */
+	target_vcpu->priority_mask = priority_mask;
+
 	/*
 	 * TODO: Design limitation. Current implementation does not support
 	 * handling a secure interrupt while currently handling a secure
@@ -1112,8 +1124,8 @@
 	current->current_sec_interrupt_id = 0;
 	vcpu_unlock(&current_locked);
 
-	/* Unmask interrupts. */
-	plat_interrupts_set_priority_mask(0xff);
+	/* Restore interrupt priority mask. */
+	plat_interrupts_set_priority_mask(current->priority_mask);
 
 	*next = api_switch_to_other_world(current, other_world_ret,
 					  VCPU_STATE_WAITING);
@@ -1164,8 +1176,8 @@
 	sl_unlock(&target_vcpu->lock);
 	sl_unlock(&current->lock);
 
-	/* Unmask interrupts. */
-	plat_interrupts_set_priority_mask(0xff);
+	/* Restore interrupt priority mask. */
+	plat_interrupts_set_priority_mask(current->priority_mask);
 
 	/* The pre-empted vCPU should be run. */
 	*next = target_vcpu;