test(realm): add test for multi rec planes
Test exercises SMC_PSCI_CPU_ON from aux plane.
Request is first routed to P0 and then to Host.
Host enters P0 and then P1 on all CPUs.
Change-Id: I7e34a0070ffa7305b97a0d93de62b64042771a18
Signed-off-by: Shruti Gupta <shruti.gupta@arm.com>
diff --git a/realm/include/realm_tests.h b/realm/include/realm_tests.h
index 6c1e6c31..6f457e5 100644
--- a/realm/include/realm_tests.h
+++ b/realm/include/realm_tests.h
@@ -25,6 +25,7 @@
bool test_realm_sve_undef_abort(void);
bool test_realm_multiple_rec_psci_denied_cmd(void);
bool test_realm_multiple_rec_multiple_cpu_cmd(void);
+bool test_realm_multiple_plane_multiple_rec_multiple_cpu_cmd(void);
bool test_realm_sme_read_id_registers(void);
bool test_realm_sme_undef_abort(void);
bool test_realm_sctlr2_ease(void);
diff --git a/realm/realm_multiple_rec.c b/realm/realm_multiple_rec.c
index c584cd4..95bb6f1 100644
--- a/realm/realm_multiple_rec.c
+++ b/realm/realm_multiple_rec.c
@@ -15,18 +15,26 @@
#include <host_shared_data.h>
#include <psci.h>
#include "realm_def.h"
+#include <realm_helpers.h>
+#include <realm_psi.h>
#include <realm_rsi.h>
#include <realm_tests.h>
#include <realm_psci.h>
#include <tftf_lib.h>
-#define CXT_ID_MAGIC 0x100
+#define CXT_ID_MAGIC 0x100
+#define P1_CXT_ID_MAGIC 0x200
+
static uint64_t is_secondary_cpu_booted;
static spinlock_t lock;
+static rsi_plane_run run[MAX_REC_COUNT] __aligned(PAGE_SIZE);
+static u_register_t base, plane_index, perm_index;
-static void rec1_handler(u_register_t cxt_id)
+static void plane0_recn_handler(u_register_t cxt_id)
{
- realm_printf("running on CPU = 0x%lx cxt_id= 0x%lx\n",
+ uint64_t rec = 0U;
+
+ realm_printf("running on Rec= 0x%lx cxt_id= 0x%lx\n",
read_mpidr_el1() & MPID_MASK, cxt_id);
if (cxt_id < CXT_ID_MAGIC || cxt_id > CXT_ID_MAGIC + MAX_REC_COUNT) {
realm_printf("Wrong cxt_id\n");
@@ -34,6 +42,35 @@
}
spin_lock(&lock);
is_secondary_cpu_booted++;
+ rec = read_mpidr_el1() & MPID_MASK;
+ spin_unlock(&lock);
+
+ /* enter plane */
+ u_register_t flags = 0U;
+
+ /* Use Base adr, plane_index, perm_index programmed by P0 rec0 */
+ run[rec].enter.pc = base;
+ realm_printf("Entering plane %ld, ep=0x%lx rec=0x%lx\n", plane_index, base, rec);
+ realm_plane_enter(plane_index, perm_index, base, flags, &run[rec]);
+
+ if (run[rec].exit.gprs[0] == SMC_PSCI_CPU_OFF) {
+ realm_printf("Plane N did not request CPU OFF\n");
+ rsi_exit_to_host(HOST_CALL_EXIT_FAILED_CMD);
+ }
+ realm_cpu_off();
+}
+
+static void recn_handler(u_register_t cxt_id)
+{
+ realm_printf("running on Rec= 0x%lx cxt_id= 0x%lx\n",
+ read_mpidr_el1() & MPID_MASK, cxt_id);
+
+ if (cxt_id < P1_CXT_ID_MAGIC || cxt_id > P1_CXT_ID_MAGIC + MAX_REC_COUNT) {
+ realm_printf("Wrong cxt_id\n");
+ rsi_exit_to_host(HOST_CALL_EXIT_FAILED_CMD);
+ }
+ spin_lock(&lock);
+ is_secondary_cpu_booted++;
spin_unlock(&lock);
realm_cpu_off();
}
@@ -48,7 +85,7 @@
u_register_t ret;
is_secondary_cpu_booted = 0U;
- ret = realm_cpu_on(1U, (uintptr_t)rec1_handler, 0x100);
+ ret = realm_cpu_on(1U, (uintptr_t)recn_handler, 0x100);
if (ret != PSCI_E_DENIED) {
return false;
}
@@ -71,12 +108,106 @@
return true;
}
+/*
+ * All Planes enter this test function.
+ * P0 Rec0 Enters Plane N
+ * Plane N rec 0 requests CPU ON for all other rec
+ * P0 Rec0 requests CPU ON to host
+ * Host enters P0 RecN from different CPU
+ * P0 RecN enters PlaneN RecN
+ * Rec N requests CPU OFF, exits to P0
+ * P0 requests CPU OFF to host.
+ * P0 verifies all other CPU are off.
+ */
+bool test_realm_multiple_plane_multiple_rec_multiple_cpu_cmd(void)
+{
+ unsigned int i = 1U, rec_count;
+ u_register_t ret;
+ bool ret1;
+
+ realm_printf("Realm: running on Rec= 0x%lx\n", read_mpidr_el1() & MPID_MASK);
+ rec_count = realm_shared_data_get_my_host_val(HOST_ARG3_INDEX);
+
+ /* Check CPU_ON is supported */
+ ret = realm_psci_features(SMC_PSCI_CPU_ON);
+ if (ret != PSCI_E_SUCCESS) {
+ realm_printf("SMC_PSCI_CPU_ON not supported\n");
+ return false;
+ }
+
+ if (realm_is_plane0()) {
+ /* Plane 0 all rec */
+ u_register_t flags = 0U;
+
+ plane_index = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
+ base = realm_shared_data_get_my_host_val(HOST_ARG2_INDEX);
+ perm_index = plane_index + 1U;
+
+ plane_common_init(plane_index, perm_index, base, &run[0U]);
+
+ ret1 = realm_plane_enter(plane_index, perm_index, base, flags, &run[0U]);
+ while (ret1 && run->exit.gprs[0] == SMC_PSCI_CPU_ON_AARCH64) {
+ realm_printf("Plane N requested CPU on Rec=0x%lx\n", run[0].exit.gprs[1]);
+
+ /* Pass context tp RecN - CXT + rec idx */
+ run[0].enter.gprs[0] = realm_cpu_on(run[0].exit.gprs[1],
+ (uintptr_t)plane0_recn_handler,
+ CXT_ID_MAGIC + run[0].exit.gprs[1]);
+
+ /* re-enter plane N 1 to complete cpu on */
+ ret1 = realm_plane_enter(plane_index, perm_index, base, flags, &run[0U]);
+ if (!ret1) {
+ realm_printf("PlaneN CPU on complete failed\n");
+ rsi_exit_to_host(HOST_CALL_EXIT_FAILED_CMD);
+ }
+ }
+
+ /* wait for all CPUs to come up */
+ while (is_secondary_cpu_booted != rec_count - 1U) {
+ waitms(200);
+ }
+
+ /* wait for all CPUs to turn off */
+ while (i < rec_count) {
+ ret = realm_psci_affinity_info(i, MPIDR_AFFLVL0);
+ if (ret != PSCI_STATE_OFF) {
+ /* wait and query again */
+ realm_printf(" CPU %d is not off\n", i);
+ waitms(200);
+ continue;
+ }
+ i++;
+ }
+ realm_printf("All CPU are off\n");
+ return true;
+ } else {
+ /* Plane 1 Rec 0 */
+ for (unsigned int j = 1U; j < rec_count; j++) {
+ realm_printf("CPU ON Rec=%u\n", j);
+ ret = realm_cpu_on(j, (uintptr_t)recn_handler, P1_CXT_ID_MAGIC + j);
+ if (ret != PSCI_E_SUCCESS) {
+ realm_printf("SMC_PSCI_CPU_ON failed %d.\n", j);
+ return false;
+ }
+ }
+ /* Exit to Host to allow host to run all CPUs */
+ rsi_exit_to_host(HOST_CALL_EXIT_SUCCESS_CMD);
+
+ /* wait for all CPUs to come up */
+ while (is_secondary_cpu_booted != rec_count - 1U) {
+ waitms(200);
+ }
+ return true;
+ }
+ return true;
+}
+
bool test_realm_multiple_rec_multiple_cpu_cmd(void)
{
unsigned int i = 1U, rec_count;
u_register_t ret;
- realm_printf("Realm: running on CPU = 0x%lx\n", read_mpidr_el1() & MPID_MASK);
+ realm_printf("Realm: running on Rec= 0x%lx\n", read_mpidr_el1() & MPID_MASK);
rec_count = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
/* Check CPU_ON is supported */
@@ -87,7 +218,7 @@
}
for (unsigned int j = 1U; j < rec_count; j++) {
- ret = realm_cpu_on(j, (uintptr_t)rec1_handler, CXT_ID_MAGIC + j);
+ ret = realm_cpu_on(j, (uintptr_t)recn_handler, P1_CXT_ID_MAGIC + j);
if (ret != PSCI_E_SUCCESS) {
realm_printf("SMC_PSCI_CPU_ON failed %d.\n", j);
return false;
diff --git a/realm/realm_payload_main.c b/realm/realm_payload_main.c
index be555fa..ad98841 100644
--- a/realm/realm_payload_main.c
+++ b/realm/realm_payload_main.c
@@ -418,6 +418,9 @@
case REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD:
test_succeed = test_realm_multiple_rec_multiple_cpu_cmd();
break;
+ case REALM_PLANES_MULTIPLE_REC_MULTIPLE_CPU_CMD:
+ test_succeed = test_realm_multiple_plane_multiple_rec_multiple_cpu_cmd();
+ break;
case REALM_FEAT_DOUBLEFAULT2_TEST:
test_realm_feat_doublefault2();
test_succeed = true;
diff --git a/realm/realm_plane.c b/realm/realm_plane.c
index c7225ed..3b79aff 100644
--- a/realm/realm_plane.c
+++ b/realm/realm_plane.c
@@ -11,6 +11,8 @@
#include <debug.h>
#include <host_realm_helper.h>
+#include <psci.h>
+#include <realm_psci.h>
#include <realm_psi.h>
#include <realm_rsi.h>
#include <sync.h>
@@ -102,10 +104,30 @@
/* Disallow SMC from Plane N */
if (ec == EC_AARCH64_SMC) {
- /* TODO Support PSCI in future */
+ u_register_t smc_id = run->exit.gprs[0];
+
restore_plane_context(run);
- run->enter.gprs[0] = RSI_ERROR_STATE;
- return PSI_RETURN_TO_PN;
+ switch (smc_id) {
+ case SMC_PSCI_CPU_ON_AARCH64:
+ assert(run->exit.gprs[1] < MAX_REC_COUNT);
+ assert(run->exit.gprs[1] != 0U);
+ /* Let P0 handle CPU ON */
+ return PSI_RETURN_TO_P0;
+ case SMC_PSCI_CPU_OFF:
+ realm_cpu_off();
+ /* Does not return. */
+ return PSI_RETURN_TO_PN;
+ case SMC_PSCI_FEATURES:
+ run->enter.gprs[0] = realm_psci_features(run->exit.gprs[1U]);
+ return PSI_RETURN_TO_PN;
+ case SMC_PSCI_AFFINITY_INFO:
+ run->enter.gprs[0] = realm_psci_affinity_info(run->exit.gprs[1U],
+ run->exit.gprs[2U]);
+ return PSI_RETURN_TO_PN;
+ default:
+ run->enter.gprs[0] = RSI_ERROR_STATE;
+ return PSI_RETURN_TO_PN;
+ }
}
/* Handle PSI HVC call from Plane N */