feat: boot secondary vCPU of SP upon CPU_ON pwr mgmt event

When a secondary CPU is turned on, SPMC receives the PSCI CPU_ON
power management message from SPMD. Currently, SPMC only resumes
the pinned vCPU of the first SP (based on boot order) to allow
it to initialize itself. Once the vCPU has initialized, SPMC
relinquishes CPU cycles to the normal world.

This patch implements the support for secondary boot protocol. Each
pinned execution context of every MP SP is woken up by SPMC, thereby
giving an opportunity to the SP's vCPU on secondary CPU to initialize
itself.

Note that if a system does not have MP SPs, then there are no pinned
execution contexts on secondary CPUs.

Change-Id: I395d0456595dd904bcb63a6d62447f07e044b812
Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index bf411f2..5edf031 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -306,6 +306,63 @@
 }
 
 #if SECURE_WORLD == 1
+/*
+ * TODO: the power management event reached the SPMC. In a later iteration, the
+ * power management event can be passed to the SP by resuming it.
+ */
+static struct ffa_value handle_psci_framework_msg(struct ffa_value *args,
+						  struct vcpu *current)
+{
+	enum psci_return_code psci_msg_response;
+	uint64_t psci_func = args->arg3;
+
+	switch (psci_func) {
+	case PSCI_CPU_OFF: {
+		/*
+		 * Mark all the vCPUs pinned on this CPU as OFF. Note that the
+		 * vCPU of an UP SP is not turned off since SPMC can migrate it
+		 * to an online CPU when needed.
+		 */
+		for (ffa_vm_count_t index = 0; index < vm_get_count();
+		     ++index) {
+			struct vm *vm = vm_find_index(index);
+
+			if (vm->vcpu_count > 1) {
+				struct vcpu *vcpu;
+				struct vcpu_locked vcpu_locked;
+
+				vcpu = vm_get_vcpu(vm, cpu_index(current->cpu));
+				vcpu_locked = vcpu_lock(vcpu);
+				vcpu->state = VCPU_STATE_OFF;
+				vcpu_unlock(&vcpu_locked);
+				dlog_verbose("SP%u turned OFF on CPU%zu\n",
+					     vm->id, cpu_index(current->cpu));
+			}
+		}
+
+		/*
+		 * Mark the CPU as turned off and reset the field tracking if
+		 * all the pinned vCPUs have been booted on this CPU.
+		 */
+		cpu_off(current->cpu);
+		current->cpu->last_sp_initialized = false;
+		psci_msg_response = PSCI_RETURN_SUCCESS;
+
+		break;
+	}
+	default:
+		dlog_error(
+			"FF-A PSCI framework message not handled "
+			"%#lx %#lx %#lx %#lx\n",
+			args->func, args->arg1, args->arg2, args->arg3);
+		psci_msg_response = PSCI_ERROR_NOT_SUPPORTED;
+	}
+
+	return ffa_framework_msg_resp(HF_SPMC_VM_ID, HF_SPMD_VM_ID,
+				      SPMD_FRAMEWORK_MSG_PSCI_RESP,
+				      psci_msg_response);
+}
+
 /**
  * Handle special direct messages from SPMD to SPMC.
  */
@@ -335,48 +392,7 @@
 
 	switch (func) {
 	case SPMD_FRAMEWORK_MSG_PSCI_REQ: {
-		enum psci_return_code psci_msg_response =
-			PSCI_ERROR_NOT_SUPPORTED;
-		struct vm *vm = vm_get_boot_vm();
-		struct vcpu_locked vcpu_locked;
-
-		/*
-		 * TODO: the power management event reached the SPMC.
-		 * In a later iteration, the power management event can
-		 * be passed to the SP by resuming it.
-		 */
-		switch (args->arg3) {
-		case PSCI_CPU_OFF: {
-			if (vm_power_management_cpu_off_requested(vm) == true) {
-				struct vcpu *vcpu;
-
-				/* Allow only S-EL1 MP SPs to reach here. */
-				CHECK(vm->el0_partition == false);
-				CHECK(vm->vcpu_count > 1);
-
-				vcpu = vm_get_vcpu(vm, vcpu_index(current));
-				vcpu_locked = vcpu_lock(vcpu);
-				vcpu->state = VCPU_STATE_OFF;
-				vcpu_unlock(&vcpu_locked);
-				cpu_off(vcpu->cpu);
-				dlog_verbose("cpu%u off notification!\n",
-					     vcpu_index(vcpu));
-			}
-
-			psci_msg_response = PSCI_RETURN_SUCCESS;
-			break;
-		}
-		default:
-			dlog_error(
-				"FF-A PSCI framework message not handled "
-				"%#lx %#lx %#lx %#lx\n",
-				args->func, args->arg1, args->arg2, args->arg3);
-			psci_msg_response = PSCI_ERROR_NOT_SUPPORTED;
-		}
-
-		*args = ffa_framework_msg_resp(HF_SPMC_VM_ID, HF_SPMD_VM_ID,
-					       SPMD_FRAMEWORK_MSG_PSCI_RESP,
-					       psci_msg_response);
+		*args = handle_psci_framework_msg(args, current);
 		return true;
 	}
 	case SPMD_FRAMEWORK_MSG_FFA_VERSION_REQ: {
diff --git a/src/arch/aarch64/plat/psci/spmc.c b/src/arch/aarch64/plat/psci/spmc.c
index c2b8653..2da5c27 100644
--- a/src/arch/aarch64/plat/psci/spmc.c
+++ b/src/arch/aarch64/plat/psci/spmc.c
@@ -54,15 +54,73 @@
 	(void)power_state;
 }
 
+/** Switch to the normal world vCPU pinned on this physical CPU now. */
+static struct vcpu *plat_psci_switch_to_other_world(struct cpu *c)
+{
+	struct vcpu_locked other_world_vcpu_locked;
+	struct vm *other_world_vm = vm_find(HF_OTHER_WORLD_ID);
+	struct vcpu *other_world_vcpu;
+
+	CHECK(other_world_vm != NULL);
+
+	other_world_vcpu = vm_get_vcpu(other_world_vm, cpu_index(c));
+
+	CHECK(other_world_vcpu != NULL);
+
+	other_world_vcpu_locked = vcpu_lock(other_world_vcpu);
+
+	/*
+	 * Return FFA_MSG_WAIT_32 to indicate to SPMD that SPMC
+	 * has successfully finished initialization on this
+	 * CPU.
+	 */
+	arch_regs_set_retval(&other_world_vcpu->regs,
+			     (struct ffa_value){.func = FFA_MSG_WAIT_32});
+
+	other_world_vcpu->state = VCPU_STATE_WAITING;
+	vcpu_unlock(&other_world_vcpu_locked);
+
+	return other_world_vcpu;
+}
+
+/**
+ * Check if there is at least one SP whose execution context needs to be
+ * bootstrapped on this physical CPU.
+ */
+static struct vm *plat_psci_get_boot_vm(struct cpu *c)
+{
+	struct vm *boot_vm;
+
+	if (cpu_index(c) == PRIMARY_CPU_IDX) {
+		boot_vm = vm_get_boot_vm();
+
+		/*
+		 * On the primary CPU, at least one SP will exist whose
+		 * execution context shall be bootstrapped.
+		 */
+		CHECK(boot_vm != NULL);
+	} else {
+		boot_vm = vm_get_boot_vm_secondary_core();
+
+		/*
+		 * It is possible that no SP might exist that needs its
+		 * execution context to be bootstrapped on secondary CPU. This
+		 * can happen if all the SPs in the system are UP partitions and
+		 * hence, have no vCPUs pinned to secondary CPUs.
+		 */
+		if (boot_vm != NULL) {
+			assert(boot_vm->vcpu_count > 1);
+		}
+	}
+
+	return boot_vm;
+}
+
 struct vcpu *plat_psci_cpu_resume(struct cpu *c)
 {
 	struct vcpu_locked vcpu_locked;
-	struct vcpu_locked other_world_vcpu_locked;
-	struct vcpu *vcpu;
-	struct vm *vm = vm_get_boot_vm();
-	struct vm *other_world_vm;
-	struct vcpu *other_world_vcpu;
-	struct two_vcpu_locked vcpus_locked;
+	struct vm *boot_vm;
+	struct vcpu *boot_vcpu;
 
 	cpu_on(c);
 
@@ -71,47 +129,42 @@
 	/* Initialize SRI for running core. */
 	ffa_notifications_sri_init(c);
 
-	vcpu = vm_get_vcpu(vm, vm_is_up(vm) ? 0 : cpu_index(c));
+	boot_vm = plat_psci_get_boot_vm(c);
 
-	assert(vcpu != NULL);
-
-	vcpu_locked = vcpu_lock(vcpu);
-
-	if (vcpu->rt_model != RTM_SP_INIT) {
-		other_world_vm = vm_find(HF_OTHER_WORLD_ID);
-		CHECK(other_world_vm != NULL);
-		other_world_vcpu = vm_get_vcpu(other_world_vm, cpu_index(c));
-		vcpu_unlock(&vcpu_locked);
-
-		/* Lock both vCPUs at once to avoid deadlock. */
-		vcpus_locked = vcpu_lock_both(vcpu, other_world_vcpu);
-		vcpu_locked = vcpus_locked.vcpu1;
-		other_world_vcpu_locked = vcpus_locked.vcpu2;
-
-		vcpu = api_switch_to_other_world(
-			other_world_vcpu_locked,
-			(struct ffa_value){.func = FFA_MSG_WAIT_32},
-			VCPU_STATE_WAITING);
-		vcpu_unlock(&other_world_vcpu_locked);
-		goto exit;
+	if (boot_vm == NULL) {
+		return plat_psci_switch_to_other_world(c);
 	}
 
-	vcpu->cpu = c;
+	/* Obtain the vCPU for the boot SP on this CPU. */
+	boot_vcpu = vm_get_vcpu(boot_vm, cpu_index(c));
 
-	vcpu_secondary_reset_and_start(vcpu_locked, vcpu->vm->secondary_ep,
+	/* Lock the vCPU to update its fields. */
+	vcpu_locked = vcpu_lock(boot_vcpu);
+
+	/* Pin the vCPU to this CPU. */
+	boot_vcpu->cpu = c;
+
+	vcpu_secondary_reset_and_start(vcpu_locked, boot_vcpu->vm->secondary_ep,
 				       0ULL);
+
+	/* Set the vCPU's state to RUNNING. */
 	vcpu_set_running(vcpu_locked, NULL);
 
 	/* vCPU restarts in runtime model for SP initialization. */
-	vcpu->rt_model = RTM_SP_INIT;
+	boot_vcpu->rt_model = RTM_SP_INIT;
 
 	/* Set the designated GP register with the core linear id. */
-	vcpu_set_phys_core_idx(vcpu);
+	vcpu_set_phys_core_idx(boot_vcpu);
 
-	vcpu_set_boot_info_gp_reg(vcpu);
+	if (cpu_index(c) == PRIMARY_CPU_IDX) {
+		/*
+		 * Boot information is passed by the SPMC to the SP's execution
+		 * context only on the primary CPU.
+		 */
+		vcpu_set_boot_info_gp_reg(boot_vcpu);
+	}
 
-exit:
 	vcpu_unlock(&vcpu_locked);
 
-	return vcpu;
+	return boot_vcpu;
 }
diff --git a/src/ffa/spmc.c b/src/ffa/spmc.c
index ba325d6..c661d90 100644
--- a/src/ffa/spmc.c
+++ b/src/ffa/spmc.c
@@ -353,54 +353,79 @@
 
 bool sp_boot_next(struct vcpu_locked current_locked, struct vcpu **next)
 {
-	static bool spmc_booted = false;
 	struct vcpu *vcpu_next = NULL;
 	struct vcpu *current = current_locked.vcpu;
 	struct vm *next_vm;
 	size_t cpu_indx = cpu_index(current->cpu);
 
-	if (spmc_booted) {
+	if (current->cpu->last_sp_initialized) {
 		return false;
 	}
 
-	assert(current->rt_model == RTM_SP_INIT);
-
 	if (!atomic_load_explicit(&current->vm->aborting,
 				  memory_order_relaxed)) {
 		/* vCPU has just returned from successful initialization. */
-		dlog_info("Initialized VM: %#x, boot_order: %u\n",
-			  current->vm->id, current->vm->boot_order);
+		dlog_verbose(
+			"Initialized execution context of VM: %#x on CPU: %zu, "
+			"boot_order: %u\n",
+			current->vm->id, cpu_index(current->cpu),
+			current->vm->boot_order);
+	}
+
+	if (cpu_index(current_locked.vcpu->cpu) == PRIMARY_CPU_IDX) {
+		next_vm = vm_get_next_boot(current->vm);
+	} else {
+		/* SP boot chain on secondary CPU. */
+		next_vm = vm_get_next_boot_secondary_core(current->vm);
 	}
 
 	current->state = VCPU_STATE_WAITING;
-
-	/*
-	 * Pick next SP's vCPU to be booted. Once all SPs have booted
-	 * (next_boot is NULL), then return execution to NWd.
-	 */
-	next_vm = vm_get_next_boot(current->vm);
-
-	if (next_vm == NULL) {
-		dlog_notice("Finished initializing all VMs.\n");
-		spmc_booted = true;
-		return false;
-	}
-
 	current->rt_model = RTM_NONE;
 	current->scheduling_mode = NONE;
 
+	/*
+	 * Pick next SP's vCPU to be booted. Once all SPs have booted
+	 * (next_vm is NULL), then return execution to NWd.
+	 */
+	if (next_vm == NULL) {
+		current->cpu->last_sp_initialized = true;
+		goto out;
+	}
+
 	vcpu_next = vm_get_vcpu(next_vm, cpu_indx);
-	CHECK(vcpu_next->rt_model == RTM_SP_INIT);
-	arch_regs_reset(vcpu_next);
-	vcpu_next->cpu = current->cpu;
-	vcpu_next->state = VCPU_STATE_RUNNING;
-	vcpu_next->regs_available = false;
-	vcpu_set_phys_core_idx(vcpu_next);
-	vcpu_set_boot_info_gp_reg(vcpu_next);
 
-	*next = vcpu_next;
+	/*
+	 * An SP's execution context needs to be bootstrapped if:
+	 * - It has never been initialized before.
+	 * - Or it was turned off when the CPU, on which it was pinned, was
+	 *   powered down.
+	 */
+	if (vcpu_next->rt_model == RTM_SP_INIT ||
+	    vcpu_next->state == VCPU_STATE_OFF) {
+		vcpu_next->rt_model = RTM_SP_INIT;
+		arch_regs_reset(vcpu_next);
+		vcpu_next->cpu = current->cpu;
+		vcpu_next->state = VCPU_STATE_RUNNING;
+		vcpu_next->regs_available = false;
+		vcpu_set_phys_core_idx(vcpu_next);
+		arch_regs_set_pc_arg(&vcpu_next->regs,
+				     vcpu_next->vm->secondary_ep, 0ULL);
 
-	return true;
+		if (cpu_index(current_locked.vcpu->cpu) == PRIMARY_CPU_IDX) {
+			/*
+			 * Boot information is passed by the SPMC to the SP's
+			 * execution context only on the primary CPU.
+			 */
+			vcpu_set_boot_info_gp_reg(vcpu_next);
+		}
+
+		*next = vcpu_next;
+
+		return true;
+	}
+out:
+	dlog_notice("Finished bootstrapping all SPs on CPU%lx\n", cpu_indx);
+	return false;
 }
 
 /**
diff --git a/src/ffa/spmc/cpu_cycles.c b/src/ffa/spmc/cpu_cycles.c
index db152db..06b4698 100644
--- a/src/ffa/spmc/cpu_cycles.c
+++ b/src/ffa/spmc/cpu_cycles.c
@@ -97,8 +97,6 @@
 		goto out;
 	}
 
-	vcpu_secondary_reset_and_start(target_locked, vm->secondary_ep, 0);
-
 	if (vm_id_is_current_world(current->vm->id)) {
 		/*
 		 * Refer FF-A v1.1 EAC0 spec section 8.3.2.2.1