feat(interrupts): precedence in NS interrupt action

An SP execution context in a call chain could specify a less
permissive action than subsequent SP execution contexts in the same
call chain. The less permissive action takes precedence over the more
permissive actions specified by the subsequent execution contexts.

Permissive order:
NS_ACTION_QUEUED < NS_ACTION_ME < NS_ACTION_SIGNALED

Consequently, NS interrupts are masked prior to resuming a vCPU if
the effective action for NS interrupts is set to QUEUED.

Moreover, this patch also performs saving and restoring of the priority
mask as required during winding and unwinding of a call chain.

Change-Id: Ia5b6ba1ff61db245964175eb3680c05065eefee4
Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h
index c328051..1395bcd 100644
--- a/inc/hf/vcpu.h
+++ b/inc/hf/vcpu.h
@@ -192,6 +192,12 @@
 	 */
 	uint8_t present_action_ns_interrupts;
 
+	/**
+	 * If the action in response to a non secure interrupt is to queue it,
+	 * this field is used to save and restore the current priority mask.
+	 */
+	uint8_t mask_ns_interrupts;
+
 	/** Partition Runtime Model. */
 	enum partition_runtime_model rt_model;
 };
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index da604cc..2fd7829 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -1017,6 +1017,8 @@
 	struct vcpu *current_vcpu = current();
 	int64_t ret;
 
+	assert(current_vcpu->vm->ns_interrupts_action != NS_ACTION_QUEUED);
+
 	if (plat_ffa_vm_managed_exit_supported(current_vcpu->vm)) {
 		uint8_t pmr = plat_interrupts_get_priority_mask();
 
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index c3de2d5..43f71e1 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -1904,6 +1904,13 @@
 
 	if (current_vcpu->call_chain.prev_node == NULL) {
 		/* End of NWd scheduled call chain */
+		if (current_vcpu->present_action_ns_interrupts ==
+		    NS_ACTION_QUEUED) {
+			/* Unmask non secure interrupts. */
+			plat_interrupts_set_priority_mask(
+				current_vcpu->mask_ns_interrupts);
+		}
+
 		return api_preempt(current_vcpu);
 	}
 
@@ -1922,6 +1929,9 @@
 	 * are not changed here.
 	 */
 
+	/* Restore action for Non secure interrupts. */
+	current_vcpu->present_action_ns_interrupts =
+		current_vcpu->vm->ns_interrupts_action;
 	vcpu_unlock(&current_vcpu_locked);
 
 	/* Lock next vcpu. */
@@ -2001,15 +2011,47 @@
 	CHECK(receiver_vcpu->call_chain.next_node == NULL);
 	CHECK(receiver_vcpu->rt_model == RTM_NONE);
 
+	/* Inherit action for non secure interrupts from partition. */
+	receiver_vcpu->present_action_ns_interrupts =
+		receiver_vcpu->vm->ns_interrupts_action;
 	receiver_vcpu->rt_model = RTM_FFA_DIR_REQ;
 
 	if (!vm_id_is_current_world(sender_vm_id)) {
 		/* Start of NWd scheduled call chain. */
 		receiver_vcpu->scheduling_mode = NWD_MODE;
+		if (receiver_vcpu->present_action_ns_interrupts ==
+		    NS_ACTION_QUEUED) {
+			uint8_t priority_mask;
+			/* Save current value of priority mask. */
+			priority_mask = plat_interrupts_get_priority_mask();
+			receiver_vcpu->mask_ns_interrupts = priority_mask;
+
+			/* Mask non secure interrupts. */
+			plat_interrupts_set_priority_mask(0x80);
+		}
 	} else {
 		/* Adding a new node to an existing call chain. */
 		vcpu_call_chain_extend(current, receiver_vcpu);
 		receiver_vcpu->scheduling_mode = current->scheduling_mode;
+
+		/* Precedence for NS-Interrupt actions. */
+		if (current->present_action_ns_interrupts <
+		    receiver_vcpu->present_action_ns_interrupts) {
+			/* A less permissive action is preferred. */
+			receiver_vcpu->present_action_ns_interrupts =
+				current->present_action_ns_interrupts;
+		}
+
+		if (receiver_vcpu->present_action_ns_interrupts ==
+		    NS_ACTION_QUEUED) {
+			uint8_t priority_mask;
+			/* Save current value of priority mask. */
+			priority_mask = plat_interrupts_get_priority_mask();
+			receiver_vcpu->mask_ns_interrupts = priority_mask;
+
+			/* Mask non secure interrupts. */
+			plat_interrupts_set_priority_mask(0x80);
+		}
 	}
 }
 
@@ -2032,9 +2074,23 @@
 	if (!vm_id_is_current_world(receiver_vm_id)) {
 		/* End of NWd scheduled call chain. */
 		assert(current->call_chain.prev_node == NULL);
+		if (current->present_action_ns_interrupts == NS_ACTION_QUEUED) {
+			/* Unmask non secure interrupts. */
+			plat_interrupts_set_priority_mask(
+				current->mask_ns_interrupts);
+		}
 	} else {
 		/* Removing a node from an existing call chain. */
 		vcpu_call_chain_remove_node(current, next);
+		if (current->present_action_ns_interrupts == NS_ACTION_QUEUED) {
+			/* Unmask non secure interrupts. */
+			plat_interrupts_set_priority_mask(
+				current->mask_ns_interrupts);
+		}
+
+		/* Restore action for Non secure interrupts. */
+		current->present_action_ns_interrupts =
+			current->vm->ns_interrupts_action;
 	}
 
 	sl_unlock(&next->lock);