refactor: boot protocol

Set the default vCPU runtime model to RTM_SP_INIT from vcpu_init
function rather than vcpu_reset.
Remove the impdef is_bootstrapped and initialized flags from vCPU and VM
contexts. Rely on the RTM_SP_INIT state to reflect that a vCPU is
initializing rather than using the mentioned flags.

Similarly, revisit the SP boot loop to consume the vCPU state.
Add an SPMC initialized flag hinting that VM's first vCPU contexts are
booted and the SPMC is initialized. This information is no longer stored
individually within VM contexts.

Revisit conditions for FFA_MSG_WAIT, FFA_SECONDARY_EP_REGISTER,
FFA_MEM_PERM_SET/GET to rely on the vCPU RTM_SP_INIT state.

Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
Change-Id: I5af3b41b1f9d1121792636a37c2525d4bf22af2b
diff --git a/src/api.c b/src/api.c
index 2c6fa76..581e7bf 100644
--- a/src/api.c
+++ b/src/api.c
@@ -795,11 +795,15 @@
 
 	case VCPU_STATE_WAITING:
 		/*
-		 * An initial FFA_RUN is necessary for secondary VM/SP to reach
-		 * the message wait loop.
+		 * An initial FFA_RUN is necessary for SP's secondary vCPUs to
+		 * reach the message wait loop.
 		 */
-		if (!vcpu->is_bootstrapped) {
-			vcpu->is_bootstrapped = true;
+		if (vcpu->rt_model == RTM_SP_INIT) {
+			/*
+			 * TODO: this should be removed, but omitting it makes
+			 * normal world arch gicv3 tests failing.
+			 */
+			vcpu->rt_model = RTM_NONE;
 			break;
 		}
 
@@ -3241,7 +3245,7 @@
 					       struct vcpu *current)
 {
 	struct vm_locked vm_locked;
-	struct ffa_value ret = ffa_error(FFA_DENIED);
+	struct vcpu_locked current_locked;
 
 	/*
 	 * Reject if interface is not supported at this FF-A instance
@@ -3264,19 +3268,21 @@
 	 * address specified in the last valid invocation must be used by the
 	 * callee.
 	 */
-	vm_locked = vm_lock(current->vm);
-	if (vm_locked.vm->initialized) {
-		goto out;
+	current_locked = vcpu_lock(current);
+	if (current->rt_model != RTM_SP_INIT) {
+		dlog_error(
+			"FFA_SECONDARY_EP_REGISTER can only be called while "
+			"vCPU in run-time state for initialization.\n");
+		vcpu_unlock(&current_locked);
+		return ffa_error(FFA_DENIED);
 	}
+	vcpu_unlock(&current_locked);
 
+	vm_locked = vm_lock(current->vm);
 	vm_locked.vm->secondary_ep = entry_point;
-
-	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
-
-out:
 	vm_unlock(&vm_locked);
 
-	return ret;
+	return (struct ffa_value){.func = FFA_SUCCESS_32};
 }
 
 struct ffa_value api_ffa_notification_bitmap_create(ffa_vm_id_t vm_id,
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index c0bf68a..93991f1 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -437,7 +437,7 @@
 	case FFA_MSG_SEND_DIRECT_REQ_32: {
 		assert(vcpu != NULL);
 		/* Rule 1. */
-		if (vcpu->is_bootstrapped) {
+		if (vcpu->rt_model != RTM_SP_INIT) {
 			*next_state = VCPU_STATE_BLOCKED;
 			return true;
 		}
@@ -1045,13 +1045,13 @@
 bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
 {
 	/* FFA_MEM_PERM_SET/GET is only valid before SPs are initialized */
-	return has_vhe_support() && (current->vm->initialized == false);
+	return has_vhe_support() && (current->rt_model == RTM_SP_INIT);
 }
 
 bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current)
 {
 	/* FFA_MEM_PERM_SET/GET is only valid before SPs are initialized */
-	return has_vhe_support() && (current->vm->initialized == false);
+	return has_vhe_support() && (current->rt_model == RTM_SP_INIT);
 }
 
 /**
@@ -1185,9 +1185,7 @@
 
 	/* Check if a vCPU of SP is being resumed. */
 	if ((target_vm_id & HF_VM_ID_WORLD_MASK) != 0) {
-		if (!target_vcpu->is_bootstrapped) {
-			target_vcpu->rt_model = RTM_SP_INIT;
-		} else if (target_vcpu->processing_secure_interrupt) {
+		if (target_vcpu->processing_secure_interrupt) {
 			/*
 			 * Consider the following case: a secure interrupt
 			 * triggered in normal world and is targeted to an SP
@@ -1991,53 +1989,47 @@
 	return true;
 }
 
-static bool sp_boot_next(struct vcpu *current, struct vcpu **next,
-			 bool *boot_order_complete)
+static bool sp_boot_next(struct vcpu *current, struct vcpu **next)
 {
-	struct vm_locked current_vm_locked;
+	static bool spmc_booted = false;
 	struct vcpu *vcpu_next = NULL;
-	bool ret = false;
 
-	/*
-	 * If VM hasn't been initialized, initialize it and traverse
-	 * booting list following "next_boot" field in the VM structure.
-	 * Once all the SPs have been booted (when "next_boot" is NULL),
-	 * return execution to the NWd.
-	 */
-	current_vm_locked = vm_lock(current->vm);
-	if (current_vm_locked.vm->initialized == false) {
-		current_vm_locked.vm->initialized = true;
-		current->is_bootstrapped = true;
-		current->rt_model = RTM_NONE;
-		dlog_verbose("Initialized VM: %#x, boot_order: %u\n",
-			     current_vm_locked.vm->id,
-			     current_vm_locked.vm->boot_order);
-
-		vcpu_next = current->next_boot;
-		if (vcpu_next != NULL) {
-			/* Refer FF-A v1.1 Beta0 section 7.5 Rule 2. */
-			current->state = VCPU_STATE_WAITING;
-			CHECK(vcpu_next->vm->initialized == false);
-			*next = vcpu_next;
-			arch_regs_reset(*next);
-			(*next)->cpu = current->cpu;
-			(*next)->state = VCPU_STATE_RUNNING;
-			(*next)->regs_available = false;
-			(*next)->rt_model = RTM_SP_INIT;
-
-			vm_set_boot_info_gp_reg(vcpu_next->vm, vcpu_next);
-
-			ret = true;
-			goto out;
-		}
-
-		*boot_order_complete = true;
-		dlog_verbose("Finished initializing all VMs.\n");
+	if (spmc_booted) {
+		return false;
 	}
 
-out:
-	vm_unlock(&current_vm_locked);
-	return ret;
+	assert(current->rt_model == RTM_SP_INIT);
+
+	/* vCPU has just returned from initialization. */
+	dlog_notice("Initialized VM: %#x, boot_order: %u\n", current->vm->id,
+		    current->vm->boot_order);
+
+	/*
+	 * Pick next vCPU to be booted. Once all SPs have booted
+	 * (next_boot is NULL), then return execution to NWd.
+	 */
+	vcpu_next = current->next_boot;
+	if (vcpu_next == NULL) {
+		dlog_notice("Finished initializing all VMs.\n");
+		spmc_booted = true;
+		return false;
+	}
+
+	current->state = VCPU_STATE_WAITING;
+	current->rt_model = RTM_NONE;
+	current->scheduling_mode = NONE;
+
+	CHECK(vcpu_next->rt_model == RTM_SP_INIT);
+	arch_regs_reset(vcpu_next);
+	vcpu_next->cpu = current->cpu;
+	vcpu_next->state = VCPU_STATE_RUNNING;
+	vcpu_next->regs_available = false;
+	vcpu_set_phys_core_idx(vcpu_next);
+	vm_set_boot_info_gp_reg(vcpu_next->vm, vcpu_next);
+
+	*next = vcpu_next;
+
+	return true;
 }
 
 static void plat_ffa_signal_interrupt_args(struct ffa_value *args, uint32_t id)
@@ -2170,7 +2162,7 @@
 
 /**
  * The invocation of FFA_MSG_WAIT at secure virtual FF-A instance is compliant
- * with FF-A v1.1 EAC0 specification. It only performs the  state transition
+ * with FF-A v1.1 EAC0 specification. It only performs the state transition
  * from RUNNING to WAITING for the following Partition runtime models:
  * RTM_FFA_RUN, RTM_SEC_INTERRUPT, RTM_SP_INIT.
  */
@@ -2179,17 +2171,8 @@
 {
 	struct ffa_value ret_args =
 		(struct ffa_value){.func = FFA_INTERRUPT_32};
-	bool boot_order_complete = false;
 
-	if (sp_boot_next(current, next, &boot_order_complete)) {
-		return ret_args;
-	}
-
-	/* All the SPs have been booted now. Return to NWd. */
-	if (boot_order_complete) {
-		*next = api_switch_to_other_world(
-			current, (struct ffa_value){.func = FFA_MSG_WAIT_32},
-			VCPU_STATE_WAITING);
+	if (sp_boot_next(current, next)) {
 		return ret_args;
 	}
 
diff --git a/src/vcpu.c b/src/vcpu.c
index ba56e7b..37561a7 100644
--- a/src/vcpu.c
+++ b/src/vcpu.c
@@ -68,6 +68,7 @@
 	vcpu->state = VCPU_STATE_OFF;
 	vcpu->direct_request_origin_vm_id = HF_INVALID_VM_ID;
 	vcpu->present_action_ns_interrupts = NS_ACTION_INVALID;
+	vcpu->rt_model = RTM_SP_INIT;
 	vcpu->next_boot = NULL;
 }
 
@@ -203,7 +204,6 @@
 
 	/* Reset the registers to give a clean start for vCPU. */
 	arch_regs_reset(vcpu);
-	vcpu->rt_model = RTM_SP_INIT;
 }
 
 void vcpu_set_phys_core_idx(struct vcpu *vcpu)
diff --git a/src/vm.c b/src/vm.c
index e4a4252..2b4a3a7 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -1007,7 +1007,7 @@
  */
 void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu)
 {
-	if (!vm->initialized && vm->boot_info.blob_addr.ipa != 0U) {
+	if (vm->boot_info.blob_addr.ipa != 0U) {
 		arch_regs_set_gp_reg(&vcpu->regs,
 				     ipa_addr(vm->boot_info.blob_addr),
 				     vm->boot_info.gp_register_num);