feat(ls64): add a test for 64byte loads/stores instructions
This patch adds a test to verify the 64 byte load and store
instructions introduced by FEAT_LS64.
The test primarily executes instructions:
1. LD64B
2. ST64B
and ensures that the NS-EL2 has no dependency on EL3 while
running them.
Signed-off-by: Jayanth Dodderi Chidanand <jayanthdodderi.chidanand@arm.com>
Signed-off-by: Juan Pablo Conde <juanpablo.conde@arm.com>
Change-Id: I7a4ca0ee4a2c18bf0de030c72e35eb218bc6364c
diff --git a/tftf/tests/extensions/ls64/test_ls64.c b/tftf/tests/extensions/ls64/test_ls64.c
new file mode 100644
index 0000000..b7074ab
--- /dev/null
+++ b/tftf/tests/extensions/ls64/test_ls64.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "./test_ls64.h"
+#include <test_helpers.h>
+
+/*
+ * @brief Test LS64 feature support when the extension is enabled.
+ *
+ * Execute the LS64 instructions:
+ * LD64B - single-copy atomic 64-byte load.
+ * ST64B - single-copy atomic 64-byte store without return.
+ *
+ * These instructions should not be trapped to EL3, when EL2 access them.
+ *
+ * @return test_result_t
+ */
+test_result_t test_ls64_instructions(void)
+{
+#if PLAT_fvp
+#ifdef __aarch64__
+
+ /* Make sure FEAT_LS64 is supported. */
+ SKIP_TEST_IF_LS64_NOT_SUPPORTED();
+
+ uint64_t ls64_input_buffer[LS64_ARRAYSIZE] = {1, 2, 3, 4, 5, 6, 7, 8};
+ uint64_t ls64_output_buffer[LS64_ARRAYSIZE] = {0};
+ /*
+ * Address where the data will be written to/read from with instructions
+ * st64b and ld64b respectively.
+ * Can only be in range (0x1d000000 - 0x1d00ffff) and be 64-byte aligned.
+ */
+ uint64_t *store_address = (uint64_t *)LS64_ATOMIC_DEVICE_BASE;
+
+ /**
+ * FEAT_LS64 : Execute LD64B and ST64B Instructions.
+ * This test copies data from input buffer, an array of 8-64bit
+ * unsigned integers to an output buffer via LD64B and ST64B
+ * atomic operation instructions.
+ *
+ * NOTE: As we cannot pre-write into LS64_ATOMIC_DEVICE_BASE memory
+ * via other instructions, we first load the data from a normal
+ * input buffer into the consecutive registers and then copy them in one
+ * atomic operation via st64b to Device memory(LS64_ATOMIC_DEVICE_BASE).
+ * Further we load the data from the same device memory into a normal
+ * output buffer through general registers and verify the buffers to
+ * ensure instructions copied the data as per the architecture.
+ */
+
+ ls64_store(ls64_input_buffer, store_address);
+ ls64_load(store_address, ls64_output_buffer);
+
+ for (uint8_t i = 0U; i < LS64_ARRAYSIZE; i++) {
+ VERBOSE("Input Buffer[%lld]=%lld\n", i, ls64_input_buffer[i]);
+ VERBOSE("Output Buffer[%lld]=%lld\n", i, ls64_output_buffer[i]);
+
+ if (ls64_input_buffer[i] != ls64_output_buffer[i]) {
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+#else
+ /* Skip test if AArch32 */
+ SKIP_TEST_IF_AARCH32();
+#endif /* __aarch64_ */
+#else
+ tftf_testcase_printf("Test supported only on FVP \n");
+ return TEST_RESULT_SKIPPED;
+#endif /* PLAT_fvp */
+
+}