feat(scheduled mode): initialize schedule mode for FFA_RUN

This patch performs the initialization of schedule mode and partition
runtime model for target SP upon FFA_RUN invocation.

Change-Id: I46346e96fe55e7d9be598996380c1877061fd0d6
Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
diff --git a/inc/hf/arch/plat/ffa.h b/inc/hf/arch/plat/ffa.h
index 540634b..4853f97 100644
--- a/inc/hf/arch/plat/ffa.h
+++ b/inc/hf/arch/plat/ffa.h
@@ -301,6 +301,9 @@
 					     struct vcpu *vcpu, uint32_t func,
 					     enum vcpu_state *next_state);
 
+void plat_ffa_init_schedule_mode_ffa_run(struct vcpu *current,
+					 struct vcpu_locked target_locked);
+
 void plat_ffa_wind_call_chain_ffa_direct_req(
 	struct vcpu_locked current_locked,
 	struct vcpu_locked receiver_vcpu_locked);
diff --git a/src/api.c b/src/api.c
index e61c7a6..557227e 100644
--- a/src/api.c
+++ b/src/api.c
@@ -920,6 +920,8 @@
 		goto out;
 	}
 
+	plat_ffa_init_schedule_mode_ffa_run(current, vcpu_locked);
+
 	/* It has been decided that the vCPU should be run. */
 	vcpu->cpu = current->cpu;
 	vcpu->state = VCPU_STATE_RUNNING;
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index be8c458..4bab99c 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -471,6 +471,13 @@
 	return true;
 }
 
+void plat_ffa_init_schedule_mode_ffa_run(struct vcpu *current,
+					 struct vcpu_locked target_locked)
+{
+	(void)current;
+	(void)target_locked;
+}
+
 void plat_ffa_wind_call_chain_ffa_direct_req(
 	struct vcpu_locked current_locked,
 	struct vcpu_locked receiver_vcpu_locked)
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 467a37a..fc1548a 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -974,6 +974,14 @@
 	}
 }
 
+void plat_ffa_init_schedule_mode_ffa_run(struct vcpu *current,
+					 struct vcpu_locked target_locked)
+{
+	/* Scheduling mode not supported in the Hypervisor/VMs. */
+	(void)current;
+	(void)target_locked;
+}
+
 void plat_ffa_wind_call_chain_ffa_direct_req(
 	struct vcpu_locked current_locked,
 	struct vcpu_locked receiver_vcpu_locked)
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index dff129d..f30c34f 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -1877,6 +1877,48 @@
 }
 
 /*
+ * Initialize the scheduling mode and/or Partition Runtime model of the target
+ * SP upon being resumed by an FFA_RUN ABI.
+ */
+void plat_ffa_init_schedule_mode_ffa_run(struct vcpu *current,
+					 struct vcpu_locked target_locked)
+{
+	struct vcpu_locked current_vcpu_locked;
+	struct vcpu *vcpu = target_locked.vcpu;
+
+	/* Lock current vCPU now. */
+	current_vcpu_locked = vcpu_lock(current);
+
+	/*
+	 * Scenario 1 in Table 8.4; Therefore SPMC could be resuming a vCPU
+	 * that was part of NWd scheduled mode.
+	 */
+	CHECK(vcpu->scheduling_mode != SPMC_MODE);
+
+	/* Section 8.2.3 bullet 4.2 of spec FF-A v1.1 EAC0. */
+	if (vcpu->state == VCPU_STATE_WAITING) {
+		if (vcpu->rt_model == RTM_SP_INIT) {
+			vcpu->scheduling_mode = NONE;
+		} else if (vcpu->rt_model == RTM_NONE) {
+			vcpu->rt_model = RTM_FFA_RUN;
+
+			if (!vm_id_is_current_world(current->vm->id) ||
+			    (current->scheduling_mode == NWD_MODE)) {
+				vcpu->scheduling_mode = NWD_MODE;
+			}
+		} else {
+			CHECK(false);
+		}
+	} else {
+		/* SP vCPU would have been pre-empted earlier or blocked. */
+		CHECK(vcpu->state == VCPU_STATE_PREEMPTED ||
+		      vcpu->state == VCPU_STATE_BLOCKED);
+	}
+
+	vcpu_unlock(&current_vcpu_locked);
+}
+
+/*
  * Start winding the call chain or continue to wind the present one upon the
  * invocation of FFA_MSG_SEND_DIRECT_REQ ABI.
  */
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 2983ef3..c41169b 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -441,6 +441,14 @@
 	return true;
 }
 
+void plat_ffa_init_schedule_mode_ffa_run(struct vcpu *current,
+					 struct vcpu_locked target_locked)
+{
+	/* Scheduling mode not supported in the Hypervisor/VMs. */
+	(void)current;
+	(void)target_locked;
+}
+
 void plat_ffa_wind_call_chain_ffa_direct_req(
 	struct vcpu_locked current_locked,
 	struct vcpu_locked receiver_vcpu_locked)