test(rmm): add testcase for multiple rec on multiple cpu

Test creates and enter 8 recs on different CPUs.
Exercises CPU_ON, CPU_OFF and PSCI_AFFINITY_INFO
from realm.

Signed-off-by: Shruti Gupta <shruti.gupta@arm.com>
Change-Id: Iecc58ea79bfde28f307d1df99680d707e57a1d80
diff --git a/include/runtime_services/host_realm_managment/host_shared_data.h b/include/runtime_services/host_realm_managment/host_shared_data.h
index 57af48d..8549512 100644
--- a/include/runtime_services/host_realm_managment/host_shared_data.h
+++ b/include/runtime_services/host_realm_managment/host_shared_data.h
@@ -45,6 +45,7 @@
 	REALM_SLEEP_CMD = 1U,
 	REALM_LOOP_CMD,
 	REALM_MULTIPLE_REC_PSCI_DENIED_CMD,
+	REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
 	REALM_GET_RSI_VERSION,
 	REALM_PMU_CYCLE,
 	REALM_PMU_EVENT,
diff --git a/realm/include/realm_tests.h b/realm/include/realm_tests.h
index 5caea8c..3016a4d 100644
--- a/realm/include/realm_tests.h
+++ b/realm/include/realm_tests.h
@@ -23,6 +23,7 @@
 bool test_realm_sve_cmp_regs(void);
 bool test_realm_sve_undef_abort(void);
 bool test_realm_multiple_rec_psci_denied_cmd(void);
+bool test_realm_multiple_rec_multiple_cpu_cmd(void);
 bool test_realm_sme_read_id_registers(void);
 bool test_realm_sme_undef_abort(void);
 
diff --git a/realm/realm_multiple_rec.c b/realm/realm_multiple_rec.c
index abd166b..c584cd4 100644
--- a/realm/realm_multiple_rec.c
+++ b/realm/realm_multiple_rec.c
@@ -22,6 +22,7 @@
 
 #define CXT_ID_MAGIC 0x100
 static uint64_t is_secondary_cpu_booted;
+static spinlock_t lock;
 
 static void rec1_handler(u_register_t cxt_id)
 {
@@ -31,7 +32,9 @@
 		realm_printf("Wrong cxt_id\n");
 		rsi_exit_to_host(HOST_CALL_EXIT_FAILED_CMD);
 	}
+	spin_lock(&lock);
 	is_secondary_cpu_booted++;
+	spin_unlock(&lock);
 	realm_cpu_off();
 }
 
@@ -67,3 +70,48 @@
 	}
 	return true;
 }
+
+bool test_realm_multiple_rec_multiple_cpu_cmd(void)
+{
+	unsigned int i = 1U, rec_count;
+	u_register_t ret;
+
+	realm_printf("Realm: running on CPU = 0x%lx\n", read_mpidr_el1() & MPID_MASK);
+	rec_count = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
+
+	/* Check CPU_ON is supported */
+	ret = realm_psci_features(SMC_PSCI_CPU_ON);
+	if (ret != PSCI_E_SUCCESS) {
+		realm_printf("SMC_PSCI_CPU_ON not supported\n");
+		return false;
+	}
+
+	for (unsigned int j = 1U; j < rec_count; j++) {
+		ret = realm_cpu_on(j, (uintptr_t)rec1_handler, CXT_ID_MAGIC + j);
+		if (ret != PSCI_E_SUCCESS) {
+			realm_printf("SMC_PSCI_CPU_ON failed %d.\n", j);
+			return false;
+		}
+	}
+
+	/* Exit to host to allow host to run all CPUs */
+	rsi_exit_to_host(HOST_CALL_EXIT_SUCCESS_CMD);
+	/* wait for all CPUs to come up */
+	while (is_secondary_cpu_booted != rec_count - 1U) {
+		waitms(200);
+	}
+
+	/* wait for all CPUs to turn off */
+	while (i < rec_count) {
+		ret = realm_psci_affinity_info(i, MPIDR_AFFLVL0);
+		if (ret != PSCI_STATE_OFF) {
+			/* wait and query again */
+			realm_printf(" CPU %d is not off\n", i);
+			waitms(200);
+			continue;
+		}
+		i++;
+	}
+	realm_printf("All CPU are off\n");
+	return true;
+}
diff --git a/realm/realm_payload_main.c b/realm/realm_payload_main.c
index 5c488ee..ddaa3cb 100644
--- a/realm/realm_payload_main.c
+++ b/realm/realm_payload_main.c
@@ -156,6 +156,8 @@
 			break;
 		case REALM_MULTIPLE_REC_PSCI_DENIED_CMD:
 			test_succeed = test_realm_multiple_rec_psci_denied_cmd();
+		case REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD:
+			test_succeed = test_realm_multiple_rec_multiple_cpu_cmd();
 			break;
 		case REALM_PAUTH_SET_CMD:
 			test_succeed = test_realm_pauth_set_cmd();
diff --git a/realm/realm_psci.c b/realm/realm_psci.c
index 2a5b951..a4a287b 100644
--- a/realm/realm_psci.c
+++ b/realm/realm_psci.c
@@ -74,15 +74,16 @@
 
 void realm_secondary_entrypoint(u_register_t cxt_id)
 {
-	u_register_t my_mpidr;
+	u_register_t my_mpidr, id;
 	secondary_ep_t ep;
 
 	my_mpidr = read_mpidr_el1() & MPID_MASK;
 	ep = entrypoint[my_mpidr];
+	id = context_id[my_mpidr];
 	if (ep != NULL) {
 		entrypoint[my_mpidr] = NULL;
 		context_id[my_mpidr] = 0;
-		(ep)(context_id[my_mpidr]);
+		(ep)(id);
 	} else {
 		/*
 		 * Host can execute Rec directly without CPU_ON
diff --git a/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c b/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c
index b840beb..dd4bd5d 100644
--- a/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c
+++ b/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c
@@ -278,3 +278,164 @@
 
 	return host_cmp_result();
 }
+
+
+static test_result_t cpu_on_handler(void)
+{
+	bool ret;
+	struct rmi_rec_run *run;
+	unsigned int i;
+
+	spin_lock(&secondary_cpu_lock);
+	i = ++is_secondary_cpu_on;
+	spin_unlock(&secondary_cpu_lock);
+	ret = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
+			RMI_EXIT_PSCI, i);
+	if (ret) {
+		run = (struct rmi_rec_run *)realm.run[i];
+		if (run->exit.gprs[0] == SMC_PSCI_CPU_OFF) {
+			return TEST_RESULT_SUCCESS;
+		}
+	}
+	ERROR("Rec %d failed\n", i);
+	return TEST_RESULT_FAIL;
+}
+
+/*
+ * The test creates a realm with MAX recs
+ * On receiving PSCI_CPU_ON call from REC0 for all other recs,
+ * the test completes the PSCI call and re-enters REC0.
+ * Turn ON secondary CPUs upto a max of MAX_REC_COUNT.
+ * Each of the secondary then enters Realm with a different REC
+ * and executes the test REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD in Realm payload.
+ * It is expected that the REC will exit with PSCI_CPU_OFF as the exit reason.
+ * REC00 checks if all other CPUs are off, via PSCI_AFFINITY_INFO.
+ * Host completes the PSCI requests.
+ */
+test_result_t host_realm_multi_rec_multiple_cpu(void)
+{
+	bool ret1, ret2;
+	test_result_t ret3 = TEST_RESULT_FAIL;
+	int ret = RMI_ERROR_INPUT;
+	u_register_t rec_num;
+	u_register_t other_mpidr, my_mpidr;
+	struct rmi_rec_run *run;
+	unsigned int host_call_result, i = 0U;
+	u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+		RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+		RMI_NOT_RUNNABLE};
+	u_register_t exit_reason;
+	int cpu_node;
+
+	SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+	SKIP_TEST_IF_LESS_THAN_N_CPUS(MAX_REC_COUNT);
+
+	if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+			(u_register_t)PAGE_POOL_BASE,
+			(u_register_t)PAGE_POOL_MAX_SIZE,
+			0UL, rec_flag, MAX_REC_COUNT)) {
+		return TEST_RESULT_FAIL;
+	}
+	if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+			NS_REALM_SHARED_MEM_SIZE)) {
+		return TEST_RESULT_FAIL;
+	}
+
+	is_secondary_cpu_on = 0U;
+	init_spinlock(&secondary_cpu_lock);
+	my_mpidr = read_mpidr_el1() & MPID_MASK;
+	host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, MAX_REC_COUNT);
+	ret1 = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
+			RMI_EXIT_PSCI, 0U);
+	if (!ret1) {
+		ERROR("Host did not receive CPU ON request\n");
+		goto destroy_realm;
+	}
+	while (true) {
+		run = (struct rmi_rec_run *)realm.run[0];
+		if (run->exit.gprs[0] != SMC_PSCI_CPU_ON_AARCH64) {
+			ERROR("Host did not receive CPU ON request\n");
+			goto destroy_realm;
+		}
+		rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+		if (rec_num >= MAX_REC_COUNT) {
+			ERROR("Invalid mpidr requested\n");
+			goto destroy_realm;
+		}
+		ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+				(unsigned long)PSCI_E_SUCCESS);
+		if (ret == RMI_SUCCESS) {
+			/* Re-enter REC0 complete CPU_ON */
+			ret = host_realm_rec_enter(&realm, &exit_reason,
+				&host_call_result, 0U);
+			if (ret != RMI_SUCCESS || exit_reason != RMI_EXIT_PSCI) {
+				break;
+			}
+		} else {
+			ERROR("host_rmi_psci_complete failed\n");
+			goto destroy_realm;
+		}
+	}
+	if (exit_reason != RMI_EXIT_HOST_CALL || host_call_result != TEST_RESULT_SUCCESS) {
+		ERROR("Realm failed\n");
+		goto destroy_realm;
+	}
+
+	/* Turn on all CPUs */
+	for_each_cpu(cpu_node) {
+		if (i == (MAX_REC_COUNT - 1U)) {
+			break;
+		}
+		other_mpidr = tftf_get_mpidr_from_node(cpu_node);
+		if (other_mpidr == my_mpidr) {
+			continue;
+		}
+
+		/* Power on the other CPU */
+		ret = tftf_try_cpu_on(other_mpidr, (uintptr_t)cpu_on_handler, 0);
+		if (ret != PSCI_E_SUCCESS) {
+			ERROR("TFTF CPU ON failed\n");
+			goto destroy_realm;
+		}
+		i++;
+	}
+
+	while (true) {
+		/* Re-enter REC0 complete PSCI_AFFINITY_INFO */
+		ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 0U);
+		if (ret != RMI_SUCCESS) {
+			ERROR("Rec0 re-enter failed\n");
+			goto destroy_realm;
+		}
+		if (run->exit.gprs[0] != SMC_PSCI_AFFINITY_INFO_AARCH64) {
+			break;
+		}
+		rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+		if (rec_num >= MAX_REC_COUNT) {
+			ERROR("Invalid mpidr requested\n");
+			goto destroy_realm;
+		}
+		ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+				(unsigned long)PSCI_E_SUCCESS);
+
+		if (ret != RMI_SUCCESS) {
+			ERROR("host_rmi_psci_complete failed\n");
+			goto destroy_realm;
+		}
+	}
+
+	if (ret == RMI_SUCCESS && exit_reason == RMI_EXIT_HOST_CALL) {
+		ret3 = host_call_result;
+	}
+destroy_realm:
+	ret2 = host_destroy_realm(&realm);
+
+	if ((ret != RMI_SUCCESS) || !ret2) {
+		ERROR("%s(): enter=%d destroy=%d\n",
+		__func__, ret, ret2);
+		return TEST_RESULT_FAIL;
+	}
+
+	return ret3;
+}
+
diff --git a/tftf/tests/tests-realm-payload.xml b/tftf/tests/tests-realm-payload.xml
index dbd9cd5..0aaa4b1 100644
--- a/tftf/tests/tests-realm-payload.xml
+++ b/tftf/tests/tests-realm-payload.xml
@@ -12,6 +12,8 @@
 	  function="host_test_realm_create_enter" />
 	  <testcase name="Multiple Realm EL1 creation and execution test"
 	  function="host_test_multiple_realm_create_enter" />
+	  <testcase name="Realm payload multi rec multiple cpu"
+	  function="host_realm_multi_rec_multiple_cpu" />
 	  <testcase name="Realm payload multi rec single cpu"
 	  function="host_realm_multi_rec_single_cpu" />
 	  <testcase name="Realm payload multi rec psci denied"