Merge changes from topic "af/add_branch_protection_makefiles"
* changes:
TFTF: Add ARMv8.5 BTI support in makefiles
TFTF: Add ARMv8.5 BTI support in xlat_tables_v2 library
TFTF: Add ARMv8.5 BTI support in assembler files
TFTF: Add ARMv8.5 BTI-related definitions
diff --git a/docs/getting_started/docs-build.rst b/docs/getting_started/docs-build.rst
index 6a6992b..1b8c1eb 100644
--- a/docs/getting_started/docs-build.rst
+++ b/docs/getting_started/docs-build.rst
@@ -64,7 +64,7 @@
::
- docs/build/html/
+ docs/build/html
We also support building documentation in other formats. From the ``docs``
directory of the project, run the following command to see the supported
@@ -76,9 +76,36 @@
make help
+Building rendered documentation from a container
+------------------------------------------------
+
+There may be cases where you can not either install or upgrade required
+dependencies to generate the documents, so in this case, one way to
+create the documentation is through a docker container. The first step is
+to check if `docker`_ is installed in your host, otherwise check main docker
+page for installation instructions. Once installed, run the following script
+from project root directory
+
+.. code:: shell
+
+ docker run --rm -v $PWD:/TF sphinxdoc/sphinx \
+ bash -c 'cd /TF && \
+ pip3 install plantuml -r ./docs/requirements.txt && make doc'
+
+The above command fetches the ``sphinxdoc/sphinx`` container from `docker
+hub`_, launches the container, installs documentation requirements and finally
+creates the documentation. Once done, exit the container and output from the
+build process will be placed in:
+
+::
+
+ docs/build/html
+
--------------
*Copyright (c) 2020, Arm Limited. All rights reserved.*
.. _Sphinx: http://www.sphinx-doc.org/en/master/
.. _pip homepage: https://pip.pypa.io/en/stable/
+.. _docker: https://www.docker.com/
+.. _docker hub: https://hub.docker.com/repository/docker/sphinxdoc/sphinx
diff --git a/include/common/fwu_nvm.h b/include/common/fwu_nvm.h
index 3865d4b..d6557ae 100644
--- a/include/common/fwu_nvm.h
+++ b/include/common/fwu_nvm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,32 +11,10 @@
#include <platform_def.h>
#define FIP_IMAGE_UPDATE_DONE_FLAG (0xDEADBEEF)
-/*
- * This is the temporary ddr address for loading backup fip.bin
- * image from NVM which is used for replacing original fip.bin
- * This address is chosen such that the NS_BL2U can be expanded
- * in future and also considering the large size of fip.bin.
- */
-#define FIP_IMAGE_TMP_DDR_ADDRESS (DRAM_BASE + 0x100000)
+
#define FWU_TFTF_TESTCASE_BUFFER_OFFSET \
(TFTF_NVM_OFFSET + TFTF_STATE_OFFSET(testcase_buffer))
-/*
- * This offset is used to corrupt data in fip.bin
- * The offset is from the base where fip.bin is
- * located in NVM. This particular value is chosen
- * to make sure the corruption is done beyond fip header.
- */
-#define FIP_CORRUPT_OFFSET (0x400)
-
-/*
- * This is the base address for backup fip.bin image in NVM
- * which is used for replacing original fip.bin
- * This address is chosen such that it can stay with all
- * the other images in the NVM.
- */
-#define FIP_BKP_ADDRESS (FLASH_BASE + 0x1000000)
-
/* Writes the buffer to the flash at offset with length equal to
* size
* Returns: STATUS_FAIL, STATUS_SUCCESS, STATUS_OUT_OF_RESOURCES
diff --git a/include/runtime_services/ffa_helpers.h b/include/runtime_services/ffa_helpers.h
index d4ef803..0692aa9 100644
--- a/include/runtime_services/ffa_helpers.h
+++ b/include/runtime_services/ffa_helpers.h
@@ -28,11 +28,25 @@
#include <stdint.h>
+struct mailbox_buffers {
+ const void *recv;
+ void *send;
+};
+
+struct ffa_partition_info {
+ /** The ID of the VM the information is about */
+ ffa_vm_id_t id;
+ /** The number of execution contexts implemented by the partition */
+ uint16_t exec_context;
+ /** The Partition's properties, e.g. supported messaging methods */
+ uint32_t properties;
+};
+
/*
* TODO: In the future this file should be placed in a common folder, and not
* under tftf. The functions in this file are also used by SPs for SPM tests.
*/
-
+bool check_spmc_execution_level(void);
smc_ret_values ffa_msg_send_direct_req(uint32_t source_id, uint32_t dest_id, uint32_t message);
smc_ret_values ffa_msg_send_direct_req64(uint32_t source_id, uint32_t dest_id, uint64_t message);
smc_ret_values ffa_run(uint32_t dest_id, uint32_t vcpu_id);
@@ -42,6 +56,9 @@
smc_ret_values ffa_msg_send_direct_resp(ffa_vm_id_t source_id,
ffa_vm_id_t dest_id, uint32_t message);
smc_ret_values ffa_error(int32_t error_code);
+smc_ret_values ffa_features(uint32_t feature);
+smc_ret_values ffa_partition_info_get(const uint32_t uuid[4]);
+smc_ret_values ffa_rx_release(void);
#endif /* __ASSEMBLY__ */
diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S
index ab37be9..6cabea2 100644
--- a/lib/aarch32/misc_helpers.S
+++ b/lib/aarch32/misc_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -16,24 +16,29 @@
* void zeromem(void *mem, unsigned int length);
*
* Initialise a memory region to 0.
- * The memory address and length must be 4-byte aligned.
+ * The memory address must be 4-byte aligned.
* -----------------------------------------------------------------------
*/
func zeromem
#if ENABLE_ASSERTIONS
tst r0, #0x3
ASM_ASSERT(eq)
- tst r1, #0x3
- ASM_ASSERT(eq)
#endif
- add r2, r0, r1
- mov r1, #0
-z_loop:
- cmp r2, r0
- beq z_end
- str r1, [r0], #4
- b z_loop
-z_end:
+ mov r2, #0
+/* zero 4 bytes at a time */
+z_loop4:
+ cmp r1, #4
+ blo z_loop1
+ str r2, [r0], #4
+ subs r1, r1, #4
+ bne z_loop4
+ bx lr
+
+/* zero byte per byte */
+z_loop1:
+ strb r2, [r0], #1
+ subs r1, r1, #1
+ bne z_loop1
bx lr
endfunc zeromem
@@ -54,20 +59,19 @@
/* copy 4 bytes at a time */
m_loop4:
cmp r2, #4
- blt m_loop1
+ blo m_loop1
ldr r3, [r1], #4
str r3, [r0], #4
- sub r2, r2, #4
- b m_loop4
+ subs r2, r2, #4
+ bne m_loop4
+ bx lr
+
/* copy byte per byte */
m_loop1:
- cmp r2,#0
- beq m_end
ldrb r3, [r1], #1
strb r3, [r0], #1
subs r2, r2, #1
bne m_loop1
-m_end:
bx lr
endfunc memcpy4
diff --git a/plat/arm/fvp/include/platform_def.h b/plat/arm/fvp/include/platform_def.h
index d5dc818..3abeb03 100644
--- a/plat/arm/fvp/include/platform_def.h
+++ b/plat/arm/fvp/include/platform_def.h
@@ -60,6 +60,31 @@
#define PLAT_ARM_FWU_FIP_SIZE (0x100000)
/*******************************************************************************
+ * This is the temporary DDR address for loading backup fip.bin
+ * image from NVM which is used for replacing original fip.bin
+ * This address is chosen such that the NS_BL2U can be expanded
+ * in future and also considering the large size of fip.bin.
+ ******************************************************************************/
+#define FIP_IMAGE_TMP_DDR_ADDRESS (DRAM_BASE + 0x100000)
+
+/*******************************************************************************
+ * This offset is used to corrupt data in fip.bin
+ * The offset is from the base where fip.bin is
+ * located in NVM. This particular value is chosen
+ * to make sure the corruption is done beyond fip header.
+ ******************************************************************************/
+#define FIP_CORRUPT_OFFSET (0x400)
+
+/*******************************************************************************
+ * This offset is used to corrupt data in fip.bin
+ * This is the base address for backup fip.bin image in NVM
+ * which is used for replacing original fip.bin
+ * This address is chosen such that it can stay with all
+ * the other images in the NVM.
+ ******************************************************************************/
+#define FIP_BKP_ADDRESS (FLASH_BASE + 0x1000000)
+
+/*******************************************************************************
* Base address and size for non-trusted SRAM.
******************************************************************************/
#define NSRAM_BASE (0x2e000000)
diff --git a/plat/arm/juno/include/platform_def.h b/plat/arm/juno/include/platform_def.h
index 03a7124..0f9bb77 100644
--- a/plat/arm/juno/include/platform_def.h
+++ b/plat/arm/juno/include/platform_def.h
@@ -71,6 +71,31 @@
#define PLAT_ARM_FWU_FIP_SIZE (0x100000)
/*******************************************************************************
+ * This is the temporary DDR address for loading backup fip.bin
+ * image from NVM which is used for replacing original fip.bin
+ * This address is chosen such that the NS_BL2U can be expanded
+ * in future and also considering the large size of fip.bin.
+ ******************************************************************************/
+#define FIP_IMAGE_TMP_DDR_ADDRESS (DRAM_BASE + 0x100000)
+
+/*******************************************************************************
+ * This offset is used to corrupt data in fip.bin
+ * The offset is from the base where fip.bin is
+ * located in NVM. This particular value is chosen
+ * to make sure the corruption is done beyond fip header.
+ ******************************************************************************/
+#define FIP_CORRUPT_OFFSET (0x400)
+
+/*******************************************************************************
+ * This offset is used to corrupt data in fip.bin
+ * This is the base address for backup fip.bin image in NVM
+ * which is used for replacing original fip.bin
+ * This address is chosen such that it can stay with all
+ * the other images in the NVM.
+ ******************************************************************************/
+#define FIP_BKP_ADDRESS (FLASH_BASE + 0x1000000)
+
+/*******************************************************************************
* Base address and size for non-trusted SRAM.
******************************************************************************/
#define NSRAM_BASE (0x2e000000)
diff --git a/spm/cactus/cactus_def.h b/spm/cactus/cactus_def.h
index a0adb23..0d3df2e 100644
--- a/spm/cactus/cactus_def.h
+++ b/spm/cactus/cactus_def.h
@@ -28,4 +28,18 @@
#define CACTUS_TX_BASE CACTUS_RX_BASE + PAGE_SIZE
#define CACTUS_RX_TX_SIZE PAGE_SIZE * 2
+/*
+ * RX/TX buffer helpers.
+ */
+#define get_sp_rx_start(sp_id) (CACTUS_RX_BASE + ((sp_id - 1) * CACTUS_RX_TX_SIZE))
+#define get_sp_rx_end(sp_id) (CACTUS_RX_BASE + ((sp_id - 1) * CACTUS_RX_TX_SIZE) + PAGE_SIZE)
+#define get_sp_tx_start(sp_id) (CACTUS_TX_BASE + ((sp_id - 1) * CACTUS_RX_TX_SIZE))
+#define get_sp_tx_end(sp_id) (CACTUS_TX_BASE + ((sp_id - 1) * CACTUS_RX_TX_SIZE) + PAGE_SIZE)
+
+/*
+ * UUID of secure partition as defined in the respective manifests.
+ */
+#define PRIMARY_UUID {0xb4b5671e, 0x4a904fe1, 0xb81ffb13, 0xdae1dacb}
+#define SECONDARY_UUID {0xd1582309, 0xf02347b9, 0x827c4464, 0xf5578fc8}
+
#endif /* CACTUS_DEF_H */
diff --git a/spm/cactus/cactus_ffa_tests.c b/spm/cactus/cactus_ffa_tests.c
index 411cc9f..25c20b0 100644
--- a/spm/cactus/cactus_ffa_tests.c
+++ b/spm/cactus/cactus_ffa_tests.c
@@ -4,8 +4,9 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
-#include <errno.h>
#include <debug.h>
+#include <errno.h>
+#include <cactus_def.h>
#include <ffa_helpers.h>
#include <sp_helpers.h>
@@ -13,20 +14,146 @@
#define FFA_MAJOR 1U
#define FFA_MINOR 0U
-void ffa_tests(void)
-{
- const char *test_ffa = "FFA Interfaces";
- const char *test_ffa_version = "FFA Version interface";
+static const uint32_t primary_uuid[4] = PRIMARY_UUID;
+static const uint32_t secondary_uuid[4] = SECONDARY_UUID;
+static const uint32_t null_uuid[4] = {0};
- announce_test_section_start(test_ffa);
+struct feature_test {
+ const char *test_name;
+ unsigned int feature;
+ unsigned int expected_ret;
+};
+
+static const struct feature_test test_target[] = {
+ {"FFA_ERROR_32 check", FFA_ERROR, FFA_SUCCESS_SMC32},
+ {"FFA_SUCCESS_32 check", FFA_SUCCESS_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_INTERRUPT_32 check", FFA_INTERRUPT, FFA_SUCCESS_SMC32},
+ {"FFA_VERSION_32 check", FFA_VERSION, FFA_SUCCESS_SMC32},
+ {"FFA_FEATURES_32 check", FFA_FEATURES, FFA_SUCCESS_SMC32},
+ {"FFA_RX_RELEASE_32 check", FFA_RX_RELEASE, FFA_SUCCESS_SMC32},
+ {"FFA_RXTX_MAP_32 check", FFA_RXTX_MAP_SMC32, FFA_ERROR},
+ {"FFA_RXTX_MAP_64 check", FFA_RXTX_MAP_SMC64, FFA_SUCCESS_SMC32},
+ {"FFA_RXTX_UNMAP_32 check", FFA_RXTX_UNMAP, FFA_ERROR},
+ {"FFA_PARTITION_INFO_GET_32 check", FFA_PARTITION_INFO_GET, FFA_SUCCESS_SMC32},
+ {"FFA_ID_GET_32 check", FFA_ID_GET, FFA_SUCCESS_SMC32},
+ {"FFA_MSG_POLL_32 check", FFA_MSG_POLL, FFA_SUCCESS_SMC32},
+ {"FFA_MSG_WAIT_32 check", FFA_MSG_WAIT, FFA_SUCCESS_SMC32},
+ {"FFA_YIELD_32 check", FFA_MSG_YIELD, FFA_SUCCESS_SMC32},
+ {"FFA_RUN_32 check", FFA_MSG_RUN, FFA_SUCCESS_SMC32},
+ {"FFA_MSG_SEND_32 check", FFA_MSG_SEND, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_DONATE_32 check", FFA_MEM_DONATE_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_LEND_32 check", FFA_MEM_LEND_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_SHARE_32 check", FFA_MEM_SHARE_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RETRIEVE_REQ_32 check", FFA_MEM_RETRIEVE_REQ_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RETRIEVE_RESP_32 check", FFA_MEM_RETRIEVE_RESP, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RELINQUISH_32 check", FFA_MEM_RELINQUISH, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RECLAIM_32 check", FFA_MEM_RECLAIM, FFA_SUCCESS_SMC32},
+ {"Check non-existent command", 0xFFFF, FFA_ERROR}
+};
+
+/*
+ * Test FFA_FEATURES interface.
+ */
+static void ffa_features_test(void)
+{
+ const char *test_features = "FFA Features interface";
+ smc_ret_values ffa_ret;
+ unsigned int i, test_target_size =
+ sizeof(test_target) / sizeof(struct feature_test);
+
+ announce_test_section_start(test_features);
+
+ for (i = 0U; i < test_target_size; i++) {
+ announce_test_start(test_target[i].test_name);
+
+ ffa_ret = ffa_features(test_target[i].feature);
+ expect(ffa_ret.ret0, test_target[i].expected_ret);
+ if (test_target[i].expected_ret == FFA_ERROR) {
+ expect(ffa_ret.ret2, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ announce_test_end(test_target[i].test_name);
+ }
+
+ announce_test_section_end(test_features);
+}
+
+static void ffa_partition_info_helper(struct mailbox_buffers *mb, const uint32_t uuid[4],
+ const struct ffa_partition_info *expected,
+ const uint16_t expected_size)
+{
+ smc_ret_values ret = ffa_partition_info_get(uuid);
+ unsigned int i;
+ expect(ret.ret0, FFA_SUCCESS_SMC32);
+
+ struct ffa_partition_info *info = (struct ffa_partition_info *)(mb->recv);
+ for (i = 0U; i < expected_size; i++) {
+ expect(info[i].id, expected[i].id);
+ expect(info[i].exec_context, expected[i].exec_context);
+ expect(info[i].properties, expected[i].properties);
+ }
+
+ ret = ffa_rx_release();
+ expect(ret.ret0, FFA_SUCCESS_SMC32);
+}
+
+static void ffa_partition_info_wrong_test(void)
+{
+ const char *test_wrong_uuid = "Request wrong UUID";
+ uint32_t uuid[4] = {1};
+
+ announce_test_start(test_wrong_uuid);
+
+ smc_ret_values ret = ffa_partition_info_get(uuid);
+ expect(ret.ret0, FFA_ERROR);
+ expect(ret.ret2, FFA_ERROR_INVALID_PARAMETER);
+
+ announce_test_end(test_wrong_uuid);
+}
+
+static void ffa_partition_info_get_test(struct mailbox_buffers *mb)
+{
+ const char *test_partition_info = "FFA Partition info interface";
+ const char *test_primary = "Get primary partition info";
+ const char *test_secondary = "Get secondary partition info";
+ const char *test_all = "Get all partitions info";
+
+ const struct ffa_partition_info expected_info[] = {
+ {.id = SPM_VM_ID_FIRST, .exec_context = 8, .properties = 0}, /* Primary partition info */
+ {.id = 2, .exec_context = 2, .properties = 0} /* Secondary partition info */
+ };
+
+ announce_test_section_start(test_partition_info);
+
+ announce_test_start(test_secondary);
+ ffa_partition_info_helper(mb, secondary_uuid, &expected_info[1], 1);
+ announce_test_end(test_secondary);
+
+ announce_test_start(test_primary);
+ ffa_partition_info_helper(mb, primary_uuid, &expected_info[0], 1);
+ announce_test_end(test_primary);
+
+ announce_test_start(test_all);
+ ffa_partition_info_helper(mb, null_uuid, expected_info, 2);
+ announce_test_end(test_all);
+
+ ffa_partition_info_wrong_test();
+
+ announce_test_section_end(test_partition_info);
+}
+
+void ffa_version_test(void)
+{
+ const char *test_ffa_version = "FFA Version interface";
announce_test_start(test_ffa_version);
smc_ret_values ret = ffa_version(MAKE_FFA_VERSION(FFA_MAJOR, FFA_MINOR));
- uint32_t spm_version = (uint32_t)(0xFFFFFFFF & ret.ret0);
+ uint32_t spm_version = (uint32_t)ret.ret0;
- bool ffa_version_compatible = ((spm_version >> FFA_VERSION_MAJOR_SHIFT) == FFA_MAJOR &&
- (spm_version & FFA_VERSION_MINOR_MASK) >= FFA_MINOR);
+ bool ffa_version_compatible =
+ ((spm_version >> FFA_VERSION_MAJOR_SHIFT) == FFA_MAJOR &&
+ (spm_version & FFA_VERSION_MINOR_MASK) >= FFA_MINOR);
NOTICE("FFA_VERSION returned %u.%u; Compatible: %i\n",
spm_version >> FFA_VERSION_MAJOR_SHIFT,
@@ -36,6 +163,17 @@
expect((int)ffa_version_compatible, (int)true);
announce_test_end(test_ffa_version);
+}
+
+void ffa_tests(struct mailbox_buffers *mb)
+{
+ const char *test_ffa = "FFA Interfaces";
+
+ announce_test_section_start(test_ffa);
+
+ ffa_features_test();
+ ffa_version_test();
+ ffa_partition_info_get_test(mb);
announce_test_section_end(test_ffa);
}
diff --git a/spm/cactus/cactus_main.c b/spm/cactus/cactus_main.c
index 49764ec..31906d5 100644
--- a/spm/cactus/cactus_main.c
+++ b/spm/cactus/cactus_main.c
@@ -102,12 +102,12 @@
(void *)CACTUS_BSS_START, (void *)CACTUS_BSS_END);
NOTICE(" RX : %p - %p\n",
- (void *)(CACTUS_RX_BASE + ((vm_id - 1) * CACTUS_RX_TX_SIZE)),
- (void *)(CACTUS_TX_BASE + ((vm_id - 1) * CACTUS_RX_TX_SIZE)));
+ (void *)get_sp_rx_start(vm_id),
+ (void *)get_sp_rx_end(vm_id));
NOTICE(" TX : %p - %p\n",
- (void *)(CACTUS_TX_BASE + ((vm_id - 1) * CACTUS_RX_TX_SIZE)),
- (void *)(CACTUS_RX_BASE + (vm_id * CACTUS_RX_TX_SIZE)));
+ (void *)get_sp_tx_start(vm_id),
+ (void *)get_sp_tx_end(vm_id));
}
static void cactus_plat_configure_mmu(unsigned int vm_id)
@@ -129,13 +129,13 @@
CACTUS_BSS_END - CACTUS_BSS_START,
MT_RW_DATA);
- mmap_add_region((CACTUS_RX_BASE + ((vm_id - 1) * CACTUS_RX_TX_SIZE)),
- (CACTUS_RX_BASE + ((vm_id - 1) * CACTUS_RX_TX_SIZE)),
+ mmap_add_region(get_sp_rx_start(vm_id),
+ get_sp_rx_start(vm_id),
(CACTUS_RX_TX_SIZE / 2),
MT_RO_DATA);
- mmap_add_region((CACTUS_TX_BASE + ((vm_id - 1) * CACTUS_RX_TX_SIZE)),
- (CACTUS_TX_BASE + ((vm_id - 1) * CACTUS_RX_TX_SIZE)),
+ mmap_add_region(get_sp_tx_start(vm_id),
+ get_sp_tx_start(vm_id),
(CACTUS_RX_TX_SIZE / 2),
MT_RW_DATA);
@@ -147,6 +147,7 @@
{
assert(IS_IN_EL1() != 0);
+ struct mailbox_buffers mb;
/* Clear BSS */
memset((void *)CACTUS_BSS_START,
0, CACTUS_BSS_END - CACTUS_BSS_START);
@@ -159,6 +160,8 @@
}
ffa_vm_id_t ffa_id = ffa_id_ret.ret2 & 0xffff;
+ mb.send = (void *) get_sp_tx_start(ffa_id);
+ mb.recv = (void *) get_sp_rx_start(ffa_id);
/* Configure and enable Stage-1 MMU, enable D-Cache */
cactus_plat_configure_mmu(ffa_id);
@@ -190,7 +193,7 @@
cactus_print_memory_layout(ffa_id);
/* Invoking Tests */
- ffa_tests();
+ ffa_tests(&mb);
/* End up to message loop */
message_loop(ffa_id);
diff --git a/spm/cactus/cactus_tests.h b/spm/cactus/cactus_tests.h
index 23586f5..2e13a6f 100644
--- a/spm/cactus/cactus_tests.h
+++ b/spm/cactus/cactus_tests.h
@@ -7,6 +7,8 @@
#ifndef CACTUS_TESTS_H
#define CACTUS_TESTS_H
+#include <ffa_helpers.h>
+
/*
* Test functions
*/
@@ -14,7 +16,7 @@
/*
* Test to FFA interfaces.
*/
-void ffa_tests(void);
+void ffa_tests(struct mailbox_buffers *mb);
/*
* Test other things like the version number returned by SPM.
diff --git a/spm/cactus/cactus_tests_misc.c b/spm/cactus/cactus_tests_misc.c
deleted file mode 100644
index 39eb752..0000000
--- a/spm/cactus/cactus_tests_misc.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <assert.h>
-#include <debug.h>
-#include <errno.h>
-#include <sp_helpers.h>
-#include <spm_svc.h>
-#include <sprt_client.h>
-#include <sprt_svc.h>
-#include <stdint.h>
-
-#include "cactus.h"
-#include "cactus_tests.h"
-
-/*
- * Miscellaneous SPM tests.
- */
-void misc_tests(void)
-{
- int32_t ret;
-
- const char *test_sect_desc = "miscellaneous";
-
- announce_test_section_start(test_sect_desc);
-
- const char *test_version_sprt = "SPRT version check";
-
- announce_test_start(test_version_sprt);
- ret = sprt_version();
- INFO("Version = 0x%x (%u.%u)\n", ret,
- (ret >> SPRT_VERSION_MAJOR_SHIFT) & SPRT_VERSION_MAJOR_MASK,
- ret & SPRT_VERSION_MINOR_MASK);
- expect(ret, SPRT_VERSION_COMPILED);
- announce_test_end(test_version_sprt);
-
- announce_test_section_end(test_sect_desc);
-}
diff --git a/tftf/tests/plat/nvidia/tegra194/include/tegra194_ras.h b/tftf/tests/plat/nvidia/tegra194/include/tegra194_ras.h
new file mode 100644
index 0000000..9bbd261
--- /dev/null
+++ b/tftf/tests/plat/nvidia/tegra194/include/tegra194_ras.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEGRA194_RAS_H
+#define TEGRA194_RAS_H
+
+/* Error record information */
+struct err_record_info {
+ /* Opaque group-specific data */
+ void *aux_data;
+ struct {
+ /*
+ * For error records accessed via. system register, index of
+ * the error record.
+ */
+ unsigned int idx_start;
+ unsigned int num_idx;
+ } sysreg;
+};
+
+struct err_record_mapping {
+ struct err_record_info *err_records;
+ size_t num_err_records;
+};
+
+/* Implementation defined RAS error and corresponding error message */
+struct ras_error_rec {
+ const char *error_msg;
+ /* IERR(bits[15:8]) from ERR<n>STATUS */
+ uint8_t error_code;
+};
+
+/* RAS error node-specific auxiliary data */
+struct ras_aux_data {
+ /* point to null-terminated ras_error array to convert error code to msg. */
+ const struct ras_error_rec *error_records;
+ /* function to return a value which needs to be programmed into ERXCTLR_EL1
+ * to enable all specified RAS errors for current node.
+ */
+ uint64_t (*err_ctrl)(void);
+};
+
+/* Architecturally-defined primary error code SERR, bits[7:0] from ERR<n>STATUS */
+#define ERR_STATUS_SERR(X) \
+ /* SERR, message */ \
+ X(0, "No error") \
+ X(1, "IMPLEMENTATION DEFINED error") \
+ X(2, "Data value from (non-associative) internal memory") \
+ X(3, "IMPLEMENTATION DEFINED pin") \
+ X(4, "Assertion failure") \
+ X(5, "Error detected on internal data path") \
+ X(6, "Data value from associative memory") \
+ X(7, "Address/control value from associative memory") \
+ X(8, "Data value from a TLB") \
+ X(9, "Address/control value from a TLB") \
+ X(10, "Data value from producer") \
+ X(11, "Address/control value from producer") \
+ X(12, "Data value from (non-associative) external memory") \
+ X(13, "Illegal address (software fault)") \
+ X(14, "Illegal access (software fault)") \
+ X(15, "Illegal state (software fault)") \
+ X(16, "Internal data register") \
+ X(17, "Internal control register") \
+ X(18, "Error response from slave") \
+ X(19, "External timeout") \
+ X(20, "Internal timeout") \
+ X(21, "Deferred error from slave not supported at master")
+
+/* IFU Uncorrectable RAS ERROR */
+#define IFU_UNCORR_RAS_ERROR_LIST(X)
+
+/* JSR_RET Uncorrectable RAS ERROR */
+#define JSR_RET_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(JSR_RET, 35, 0x13, "Floating Point Register File Parity Error") \
+ X(JSR_RET, 34, 0x12, "Integer Register File Parity Error") \
+ X(JSR_RET, 33, 0x11, "Garbage Bundle") \
+ X(JSR_RET, 32, 0x10, "Bundle Completion Timeout")
+
+/* JSR_MTS Uncorrectable RAS ERROR */
+#define JSR_MTS_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(JSR_MTS, 40, 0x28, "CoreSight Access Error") \
+ X(JSR_MTS, 39, 0x27, "Dual Execution Uncorrectable Error") \
+ X(JSR_MTS, 37, 0x25, "CTU MMIO Region") \
+ X(JSR_MTS, 36, 0x24, "MTS MMCRAB Region Access") \
+ X(JSR_MTS, 35, 0x23, "MTS_CARVEOUT Access from ARM SW")
+
+/* LSD_STQ Uncorrectable RAS ERROR */
+#define LSD_STQ_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(LSD_STQ, 41, 0x39, "Coherent Cache Data Store Multi-Line ECC Error") \
+ X(LSD_STQ, 40, 0x38, "Coherent Cache Data Store Uncorrectable ECC Error") \
+ X(LSD_STQ, 38, 0x36, "Coherent Cache Data Load Uncorrectable ECC Error") \
+ X(LSD_STQ, 33, 0x31, "Coherent Cache Tag Store Parity Error") \
+ X(LSD_STQ, 32, 0x30, "Coherent Cache Tag Load Parity Error")
+
+/* LSD_DCC Uncorrectable RAS ERROR */
+#define LSD_DCC_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(LSD_DCC, 41, 0x49, "BTU Copy Mini-Cache PPN Multi-Hit Error") \
+ X(LSD_DCC, 39, 0x47, "Coherent Cache Data Uncorrectable ECC Error") \
+ X(LSD_DCC, 37, 0x45, "Version Cache Byte-Enable Parity Error") \
+ X(LSD_DCC, 36, 0x44, "Version Cache Data Uncorrectable ECC Error") \
+ X(LSD_DCC, 33, 0x41, "BTU Copy Coherent Cache PPN Parity Error") \
+ X(LSD_DCC, 32, 0x40, "BTU Copy Coherent Cache VPN Parity Error")
+
+/* LSD_L1HPF Uncorrectable RAS ERROR */
+#define LSD_L1HPF_UNCORR_RAS_ERROR_LIST(X)
+
+/* L2 Uncorrectable RAS ERROR */
+#define L2_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(L2, 56, 0x68, "URT Timeout") \
+ X(L2, 55, 0x67, "L2 Protocol Violation") \
+ X(L2, 54, 0x66, "SCF to L2 Slave Error Read") \
+ X(L2, 53, 0x65, "SCF to L2 Slave Error Write") \
+ X(L2, 52, 0x64, "SCF to L2 Decode Error Read") \
+ X(L2, 51, 0x63, "SCF to L2 Decode Error Write") \
+ X(L2, 50, 0x62, "SCF to L2 Request Response Interface Parity Errors") \
+ X(L2, 49, 0x61, "SCF to L2 Advance notice interface parity errors") \
+ X(L2, 48, 0x60, "SCF to L2 Filldata Parity Errors") \
+ X(L2, 47, 0x5F, "SCF to L2 UnCorrectable ECC Data Error on interface") \
+ X(L2, 45, 0x5D, "Core 1 to L2 Parity Error") \
+ X(L2, 44, 0x5C, "Core 0 to L2 Parity Error") \
+ X(L2, 43, 0x5B, "L2 Multi-Hit") \
+ X(L2, 42, 0x5A, "L2 URT Tag Parity Error") \
+ X(L2, 41, 0x59, "L2 NTT Tag Parity Error") \
+ X(L2, 40, 0x58, "L2 MLT Tag Parity Error") \
+ X(L2, 39, 0x57, "L2 URD Data") \
+ X(L2, 38, 0x56, "L2 NTP Data") \
+ X(L2, 36, 0x54, "L2 MLC Uncorrectable Clean") \
+ X(L2, 35, 0x53, "L2 URD Uncorrectable Dirty") \
+ X(L2, 34, 0x52, "L2 MLC Uncorrectable Dirty")
+
+/* CLUSTER_CLOCKS Uncorrectable RAS ERROR */
+#define CLUSTER_CLOCKS_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(CLUSTER_CLOCKS, 32, 0xE4, "Frequency Monitor Error")
+
+/* MMU Uncorrectable RAS ERROR */
+#define MMU_UNCORR_RAS_ERROR_LIST(X)
+
+/* L3 Uncorrectable RAS ERROR */
+#define L3_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(L3, 43, 0x7B, "SNOC Interface Parity Error") \
+ X(L3, 42, 0x7A, "MCF Interface Parity Error") \
+ X(L3, 41, 0x79, "L3 Tag Parity Error") \
+ X(L3, 40, 0x78, "L3 Dir Parity Error") \
+ X(L3, 39, 0x77, "L3 Uncorrectable ECC Error") \
+ X(L3, 37, 0x75, "Multi-Hit CAM Error") \
+ X(L3, 36, 0x74, "Multi-Hit Tag Error") \
+ X(L3, 35, 0x73, "Unrecognized Command Error") \
+ X(L3, 34, 0x72, "L3 Protocol Error")
+
+/* CCPMU Uncorrectable RAS ERROR */
+#define CCPMU_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(CCPMU, 33, 0x81, "CRAB Access Error")
+
+/* SCF_IOB Uncorrectable RAS ERROR */
+#define SCF_IOB_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(SCF_IOB, 41, 0x99, "Request parity error") \
+ X(SCF_IOB, 40, 0x98, "Putdata parity error") \
+ X(SCF_IOB, 39, 0x97, "Uncorrectable ECC on Putdata") \
+ X(SCF_IOB, 38, 0x96, "CBB Interface Error") \
+ X(SCF_IOB, 37, 0x95, "MMCRAB Error") \
+ X(SCF_IOB, 36, 0x94, "IHI Interface Error") \
+ X(SCF_IOB, 35, 0x93, "CRI Error") \
+ X(SCF_IOB, 34, 0x92, "TBX Interface Error") \
+ X(SCF_IOB, 33, 0x91, "EVP Interface Error")
+
+/* SCF_SNOC Uncorrectable RAS ERROR */
+#define SCF_SNOC_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(SCF_SNOC, 42, 0xAA, "Misc Client Parity Error") \
+ X(SCF_SNOC, 41, 0xA9, "Misc Filldata Parity Error") \
+ X(SCF_SNOC, 40, 0xA8, "Uncorrectable ECC Misc Client") \
+ X(SCF_SNOC, 39, 0xA7, "DVMU Interface Parity Error") \
+ X(SCF_SNOC, 38, 0xA6, "DVMU Interface Timeout Error") \
+ X(SCF_SNOC, 37, 0xA5, "CPE Request Error") \
+ X(SCF_SNOC, 36, 0xA4, "CPE Response Error") \
+ X(SCF_SNOC, 35, 0xA3, "CPE Timeout Error") \
+ X(SCF_SNOC, 34, 0xA2, "Uncorrectable Carveout Error")
+
+/* SCF_CTU Uncorrectable RAS ERROR */
+#define SCF_CTU_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(SCF_CTU, 39, 0xB7, "Timeout error for TRC_DMA request") \
+ X(SCF_CTU, 38, 0xB6, "Timeout error for CTU Snp") \
+ X(SCF_CTU, 37, 0xB5, "Parity error in CTU TAG RAM") \
+ X(SCF_CTU, 36, 0xB3, "Parity error in CTU DATA RAM") \
+ X(SCF_CTU, 35, 0xB4, "Parity error for Cluster Rsp") \
+ X(SCF_CTU, 34, 0xB2, "Parity error for TRL requests from 9 agents") \
+ X(SCF_CTU, 33, 0xB1, "Parity error for MCF request") \
+ X(SCF_CTU, 32, 0xB0, "TRC DMA fillsnoop parity error")
+
+/* CMU_CLOCKS Uncorrectable RAS ERROR */
+#define CMU_CLOCKS_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(CMU_CLOCKS, 39, 0xC7, "Cluster 3 frequency monitor error") \
+ X(CMU_CLOCKS, 38, 0xC6, "Cluster 2 frequency monitor error") \
+ X(CMU_CLOCKS, 37, 0xC5, "Cluster 1 frequency monitor error") \
+ X(CMU_CLOCKS, 36, 0xC3, "Cluster 0 frequency monitor error") \
+ X(CMU_CLOCKS, 35, 0xC4, "Voltage error on ADC1 Monitored Logic") \
+ X(CMU_CLOCKS, 34, 0xC2, "Voltage error on ADC0 Monitored Logic") \
+ X(CMU_CLOCKS, 33, 0xC1, "Lookup Table 1 Parity Error") \
+ X(CMU_CLOCKS, 32, 0xC0, "Lookup Table 0 Parity Error")
+
+/*
+ * Define one ras_error entry.
+ * This macro wille be used to to generate ras_error records for each node
+ * defined by <NODE_NAME>_UNCORR_RAS_ERROR_LIST macro.
+ */
+#define DEFINE_ONE_RAS_ERROR_MSG(unit, ras_bit, ierr, msg) \
+ { \
+ .error_msg = (msg), \
+ .error_code = (ierr) \
+ },
+
+/*
+ * Set one implementation defined bit in ERR<n>CTLR
+ * This macro will be used to collect all defined ERR_CTRL bits for each node
+ * defined by <NODE_NAME>_UNCORR_RAS_ERROR_LIST macro.
+ */
+#define DEFINE_ENABLE_RAS_BIT(unit, ras_bit, ierr, msg) \
+ do { \
+ val |= (1ULL << ras_bit##U); \
+ } while (0);
+
+/* Represent one RAS node with 0 or more error bits(ERR_CTLR) enabled */
+#define DEFINE_ONE_RAS_NODE(node) \
+static const struct ras_error_rec node##_uncorr_ras_errors[] = { \
+ node##_UNCORR_RAS_ERROR_LIST(DEFINE_ONE_RAS_ERROR_MSG) \
+ {NULL, 0U}, \
+}; \
+static inline uint64_t node##_err_ctrl(void) \
+{ \
+ uint64_t val = 0ULL; \
+ node##_UNCORR_RAS_ERROR_LIST(DEFINE_ENABLE_RAS_BIT) \
+ return val; \
+}
+
+#define DEFINE_ONE_RAS_AUX_DATA(node) \
+ { \
+ .error_records = node##_uncorr_ras_errors, \
+ .err_ctrl = &node##_err_ctrl \
+ },
+
+#define PER_CORE_RAS_NODE_LIST(X) \
+ X(IFU) \
+ X(JSR_RET) \
+ X(JSR_MTS) \
+ X(LSD_STQ) \
+ X(LSD_DCC) \
+ X(LSD_L1HPF)
+
+#define PER_CORE_RAS_GROUP_NODES PER_CORE_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA)
+
+#define PER_CLUSTER_RAS_NODE_LIST(X) \
+ X(L2) \
+ X(CLUSTER_CLOCKS) \
+ X(MMU)
+
+#define PER_CLUSTER_RAS_GROUP_NODES PER_CLUSTER_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA)
+
+#define SCF_L3_BANK_RAS_NODE_LIST(X) X(L3)
+
+/* we have 4 SCF_L3 nodes:3*256 + L3_Bank_ID(0-3) */
+#define SCF_L3_BANK_RAS_GROUP_NODES \
+ SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) \
+ SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) \
+ SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) \
+ SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA)
+
+#define CCPLEX_RAS_NODE_LIST(X) \
+ X(CCPMU) \
+ X(SCF_IOB) \
+ X(SCF_SNOC) \
+ X(SCF_CTU) \
+ X(CMU_CLOCKS)
+
+#define CCPLEX_RAS_GROUP_NODES CCPLEX_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA)
+
+#endif /* TEGRA194_RAS_H */
diff --git a/tftf/tests/plat/nvidia/tegra194/serror_handler.S b/tftf/tests/plat/nvidia/tegra194/serror_handler.S
new file mode 100644
index 0000000..f040e5d
--- /dev/null
+++ b/tftf/tests/plat/nvidia/tegra194/serror_handler.S
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <sdei.h>
+
+ .globl serror_sdei_event_handler
+/*
+ * SDEI event handler for SErrors.
+ */
+func serror_sdei_event_handler
+ stp x29, x30, [sp, #-16]!
+ bl sdei_handler
+ ldp x29, x30, [sp], #16
+ mov_imm x0, SDEI_EVENT_COMPLETE
+ mov x1, xzr
+ smc #0
+ b .
+endfunc serror_sdei_event_handler
diff --git a/tftf/tests/plat/nvidia/tegra194/test_ras_corrected.c b/tftf/tests/plat/nvidia/tegra194/test_ras_corrected.c
new file mode 100644
index 0000000..a7be1a7
--- /dev/null
+++ b/tftf/tests/plat/nvidia/tegra194/test_ras_corrected.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <drivers/arm/arm_gic.h>
+#include <events.h>
+#include <lib/irq.h>
+#include <power_management.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+#include <platform.h>
+
+#include "include/tegra194_ras.h"
+
+/* Macro to indicate CPU to start an action */
+#define START U(0xAA55)
+
+/* Global flag to indicate that a fault was received */
+static volatile uint64_t irq_received;
+
+/* NVIDIA Pseudo fault generation registers */
+#define T194_ERXPFGCTL_EL1 S3_0_C15_C1_4
+#define T194_ERXPFGCDN_EL1 S3_0_C15_C1_6
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxpfgctl_el1, T194_ERXPFGCTL_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxpfgcdn_el1, T194_ERXPFGCDN_EL1)
+
+/* Instantiate RAS nodes */
+PER_CORE_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE);
+PER_CLUSTER_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE);
+SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE);
+CCPLEX_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE);
+
+/* Instantiate RAS node groups */
+static __unused struct ras_aux_data per_core_ras_group[] = {
+ PER_CORE_RAS_GROUP_NODES
+};
+
+static __unused struct ras_aux_data per_cluster_ras_group[] = {
+ PER_CLUSTER_RAS_GROUP_NODES
+};
+
+static __unused struct ras_aux_data scf_l3_ras_group[] = {
+ SCF_L3_BANK_RAS_GROUP_NODES
+};
+
+static __unused struct ras_aux_data ccplex_ras_group[] = {
+ CCPLEX_RAS_GROUP_NODES
+};
+
+/*
+ * we have same probe and handler for each error record group, use a macro to
+ * simply the record definition.
+ */
+#define ADD_ONE_ERR_GROUP(errselr_start, group) \
+ { \
+ .sysreg.idx_start = (errselr_start), \
+ .sysreg.num_idx = ARRAY_SIZE((group)), \
+ .aux_data = (group) \
+ }
+
+/* RAS error record group information */
+static struct err_record_info tegra194_ras_records[] = {
+ /*
+ * Per core ras error records
+ *
+ * ERRSELR starts from (0*256 + Logical_CPU_ID*16 + 0) to
+ * (0*256 + Logical_CPU_ID*16 + 5) for each group.
+ * 8 cores/groups, 6 * 8 nodes in total.
+ */
+ ADD_ONE_ERR_GROUP(0x000, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x010, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x020, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x030, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x040, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x050, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x060, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x070, per_core_ras_group),
+
+ /*
+ * Per cluster ras error records
+ *
+ * ERRSELR starts from 2*256 + Logical_Cluster_ID*16 + 0 to
+ * 2*256 + Logical_Cluster_ID*16 + 3.
+ * 4 clusters/groups, 3 * 4 nodes in total.
+ */
+ ADD_ONE_ERR_GROUP(0x200, per_cluster_ras_group),
+ ADD_ONE_ERR_GROUP(0x210, per_cluster_ras_group),
+ ADD_ONE_ERR_GROUP(0x220, per_cluster_ras_group),
+ ADD_ONE_ERR_GROUP(0x230, per_cluster_ras_group),
+
+ /*
+ * SCF L3_Bank ras error records
+ *
+ * ERRSELR: 3*256 + L3_Bank_ID, L3_Bank_ID: 0-3
+ * 1 groups, 4 nodes in total.
+ */
+ ADD_ONE_ERR_GROUP(0x300, scf_l3_ras_group),
+
+ /*
+ * CCPLEX ras error records
+ *
+ * ERRSELR: 4*256 + Unit_ID, Unit_ID: 0 - 4
+ * 1 groups, 5 nodes in total.
+ */
+ ADD_ONE_ERR_GROUP(0x400, ccplex_ras_group),
+};
+
+static void test_ras_inject_error(uint32_t errselr_el1, unsigned int errctlr_bit)
+{
+ uint64_t pfg_ctlr = BIT_64(errctlr_bit);
+
+ INFO("Injecting on 0x%lx:\n\terrctlr_el1=%d\n\terrselr_el1=0x%x\n\tpfg_ctlr=0x%llx\n",
+ read_mpidr_el1(), errctlr_bit, errselr_el1, pfg_ctlr);
+
+ /* clear the flag before we inject error */
+ irq_received = 0;
+ dccvac((uint64_t)&irq_received);
+ dmbish();
+
+ /* Choose error record */
+ write_errselr_el1(errselr_el1);
+
+ /* Program count down timer to 1 */
+ write_erxpfgcdn_el1(1);
+
+ /* Start count down to generate error on expiry */
+ write_erxpfgctl_el1(3 << 6 | ERXPFGCTL_CDEN_BIT | pfg_ctlr);
+ waitms(5);
+
+ /* Wait until IRQ fires */
+ do {
+ /*
+ * clean+invalidate cache lines before reading the global
+ * flag populated by another CPU
+ */
+ dccivac((uint64_t)&irq_received);
+ dmbish();
+ } while (irq_received == 0);
+
+ /* write 1-to-clear */
+ write_erxstatus_el1(read_erxstatus_el1() | (3 << 24));
+}
+
+static void generate_corrected_faults(void)
+{
+ unsigned int i;
+ unsigned int j;
+ unsigned int k;
+ unsigned int total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(tegra194_ras_records); i++)
+ total += tegra194_ras_records[i].sysreg.num_idx;
+
+ VERBOSE("Total Nodes:%u\n", total);
+
+ for (i = 0; i < ARRAY_SIZE(tegra194_ras_records); i++) {
+
+ const struct err_record_info *info = &tegra194_ras_records[i];
+ uint32_t idx_start = info->sysreg.idx_start;
+ uint32_t num_idx = info->sysreg.num_idx;
+ const struct ras_aux_data *aux_data =
+ (const struct ras_aux_data *)info->aux_data;
+
+ /* No corrected errors for this node */
+ if (idx_start == 0x400) {
+ VERBOSE("0x%lx skipping errselr_el1=0x%x\n",
+ read_mpidr_el1(), idx_start);
+ continue;
+ }
+
+ for (j = 0; j < num_idx; j++) {
+ uint32_t errselr_el1 = idx_start + j;
+ uint64_t __unused err_fr;
+ uint64_t uncorr_errs, corr_errs;
+
+ /* Write to ERRSELR_EL1 to select the error record */
+ write_errselr_el1(errselr_el1);
+
+ /*
+ * all supported errors for this node exist in the
+ * top 32 bits
+ */
+ err_fr = read_erxfr_el1();
+ err_fr >>= 32;
+ err_fr <<= 32;
+
+ /*
+ * Mask the corrected errors that are disabled
+ * in the ERXFR register
+ */
+ uncorr_errs = aux_data[j].err_ctrl();
+ corr_errs = ~uncorr_errs & err_fr;
+
+ for (k = 32; k < 64; k++) {
+ /*
+ * JSR_MTS node, errctlr_bit = 32 or 34
+ * are uncorrected errors and should be
+ * skipped
+ */
+ if ((idx_start < 0x200) && ((errselr_el1 & 0xF) == 2) && (k == 32 || k == 34)) {
+ VERBOSE("0x%lx skipping errselr_el1=0x%x\n",
+ read_mpidr_el1(), errselr_el1);
+ continue;
+ }
+
+ if (corr_errs & BIT_64(k))
+ test_ras_inject_error(errselr_el1, k);
+ }
+ }
+ }
+}
+
+static int ce_irq_handler(void *data)
+{
+ unsigned int __unused irq_num = *(unsigned int *)data;
+
+ /* write 1-to-clear */
+ write_erxstatus_el1(read_erxstatus_el1() | (3 << 24));
+
+ irq_received = 1;
+
+ /*
+ * clean cache lines after writing the global flag so that
+ * latest value is visible to other CPUs
+ */
+ dccvac((uint64_t)&irq_received);
+ dsbish();
+
+ /* Return value doesn't matter */
+ return 0;
+}
+
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
+static volatile uint64_t cpu_powerdown[PLATFORM_CORE_COUNT];
+static volatile uint64_t cpu_start_test[PLATFORM_CORE_COUNT];
+static volatile uint64_t cpu_test_completed[PLATFORM_CORE_COUNT];
+
+static test_result_t test_corrected_errors(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ VERBOSE("Hello from core 0x%x\n", mpid);
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ /* Wait until lead CPU asks us to start the test */
+ do {
+ /*
+ * clean+invalidate cache lines before reading the global
+ * flag populated by another CPU
+ */
+ dccivac((uintptr_t)&cpu_start_test[core_pos]);
+ dmbish();
+ } while (!cpu_start_test[core_pos]);
+
+ generate_corrected_faults();
+
+ VERBOSE("0x%lx: test complete\n", read_mpidr_el1());
+
+ /* Inform lead CPU of test completion */
+ cpu_test_completed[core_pos] = true;
+ dccvac((uintptr_t)&cpu_test_completed[core_pos]);
+ dsbish();
+
+ /* Wait until lead CPU asks us to power down */
+ do {
+ /*
+ * clean+invalidate cache lines before reading the global
+ * flag populated by another CPU
+ */
+ dccivac((uintptr_t)&cpu_powerdown[core_pos]);
+ dmbish();
+ } while (!cpu_powerdown[core_pos]);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_ras_corrected(void)
+{
+ int64_t __unused ret = 0;
+ unsigned int cpu_node, cpu_mpid;
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos;
+
+ tftf_testcase_printf("Tegra194 corrected RAS error verification\n");
+
+ /* long execution test; reset watchdog */
+ tftf_platform_watchdog_reset();
+
+ /* register IRQ handler */
+ for (uint32_t irq = 424; irq <= 431; irq++) {
+
+ ret = tftf_irq_register_handler(irq, ce_irq_handler);
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+
+ /* enable the IRQ */
+ tftf_irq_enable(irq, GIC_HIGHEST_NS_PRIORITY);
+ }
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) test_corrected_errors,
+ 0);
+ if (ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * The lead CPU needs to wait for all other CPUs to enter the test.
+ * This is because the test framework declares the end of a test when no
+ * CPU is in the test. Therefore, if the lead CPU goes ahead and exits
+ * the test then potentially there could be no CPU executing the test at
+ * this time because none of them have entered the test yet, hence the
+ * framework will be misled in thinking the test is finished.
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ /* Ask all CPUs to start the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /*
+ * Except lead CPU, Wait for all cores to be powered off
+ * by framework
+ */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ /* Allow the CPU to start the test */
+ core_pos = platform_get_core_pos(cpu_mpid);
+ cpu_start_test[core_pos] = START;
+
+ /*
+ * clean cache lines after writing the global flag so that
+ * latest value is visible to other CPUs
+ */
+ dccvac((uintptr_t)&cpu_start_test[core_pos]);
+ dsbish();
+
+ /* Wait for the CPU to complete the test */
+ do {
+ /*
+ * clean+invalidate cache lines before reading the global
+ * flag populated by another CPU
+ */
+ dccivac((uintptr_t)&cpu_test_completed[core_pos]);
+ dmbish();
+ } while (!cpu_test_completed[core_pos]);
+ }
+
+ /* run through all supported corrected faults */
+ generate_corrected_faults();
+
+ /* Wait for all CPUs to power off */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /*
+ * Except lead CPU, Wait for all cores to be powered off
+ * by framework
+ */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ /* Allow other CPUs to power down */
+ core_pos = platform_get_core_pos(cpu_mpid);
+ cpu_powerdown[core_pos] = START;
+
+ /*
+ * clean cache lines after writing the global flag so that
+ * latest value is visible to other CPUs
+ */
+ dccvac((uintptr_t)&cpu_powerdown[core_pos]);
+ dsbish();
+
+ /* Wait for the CPU to actually power off */
+ while (tftf_psci_affinity_info(cpu_mpid, MPIDR_AFFLVL0) != PSCI_STATE_OFF)
+ dsbsy();
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/plat/nvidia/tegra194/test_ras_uncorrectable.c b/tftf/tests/plat/nvidia/tegra194/test_ras_uncorrectable.c
new file mode 100644
index 0000000..b52a35c
--- /dev/null
+++ b/tftf/tests/plat/nvidia/tegra194/test_ras_uncorrectable.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <events.h>
+#include <lib/irq.h>
+#include <power_management.h>
+#include <sdei.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+#include <platform.h>
+
+#include "include/tegra194_ras.h"
+
+/* Macro to indicate CPU to start an action */
+#define START U(0xAA55)
+
+/* Global flag to indicate that a fault was received */
+static volatile uint64_t fault_received;
+
+/* SDEI handler to receive RAS UC errors */
+extern int serror_sdei_event_handler(int ev, uint64_t arg);
+
+/* NVIDIA Pseudo fault generation registers */
+#define T194_ERXPFGCTL_EL1 S3_0_C15_C1_4
+#define T194_ERXPFGCDN_EL1 S3_0_C15_C1_6
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxpfgctl_el1, T194_ERXPFGCTL_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxpfgcdn_el1, T194_ERXPFGCDN_EL1)
+
+/* Instantiate RAS nodes */
+PER_CORE_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE);
+PER_CLUSTER_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE);
+SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE);
+CCPLEX_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE);
+
+/* Instantiate RAS node groups */
+static __unused struct ras_aux_data per_core_ras_group[] = {
+ PER_CORE_RAS_GROUP_NODES
+};
+
+static __unused struct ras_aux_data per_cluster_ras_group[] = {
+ PER_CLUSTER_RAS_GROUP_NODES
+};
+
+static __unused struct ras_aux_data scf_l3_ras_group[] = {
+ SCF_L3_BANK_RAS_GROUP_NODES
+};
+
+static __unused struct ras_aux_data ccplex_ras_group[] = {
+ CCPLEX_RAS_GROUP_NODES
+};
+
+/*
+ * we have same probe and handler for each error record group, use a macro to
+ * simply the record definition.
+ */
+#define ADD_ONE_ERR_GROUP(errselr_start, group) \
+ { \
+ .sysreg.idx_start = (errselr_start), \
+ .sysreg.num_idx = ARRAY_SIZE((group)), \
+ .aux_data = (group) \
+ }
+
+/* RAS error record group information */
+static struct err_record_info tegra194_ras_records[] = {
+ /*
+ * Per core RAS error records
+ *
+ * ERRSELR starts from (0*256 + Logical_CPU_ID*16 + 0) to
+ * (0*256 + Logical_CPU_ID*16 + 5) for each group.
+ * 8 cores/groups, 6 * 8 nodes in total.
+ */
+ ADD_ONE_ERR_GROUP(0x000, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x010, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x020, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x030, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x040, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x050, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x060, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x070, per_core_ras_group),
+
+ /*
+ * Per cluster ras error records
+ *
+ * ERRSELR starts from 2*256 + Logical_Cluster_ID*16 + 0 to
+ * 2*256 + Logical_Cluster_ID*16 + 3.
+ * 4 clusters/groups, 3 * 4 nodes in total.
+ */
+ ADD_ONE_ERR_GROUP(0x200, per_cluster_ras_group),
+ ADD_ONE_ERR_GROUP(0x210, per_cluster_ras_group),
+ ADD_ONE_ERR_GROUP(0x220, per_cluster_ras_group),
+ ADD_ONE_ERR_GROUP(0x230, per_cluster_ras_group),
+
+ /*
+ * SCF L3_Bank ras error records
+ *
+ * ERRSELR: 3*256 + L3_Bank_ID, L3_Bank_ID: 0-3
+ * 1 groups, 4 nodes in total.
+ */
+ ADD_ONE_ERR_GROUP(0x300, scf_l3_ras_group),
+
+ /*
+ * CCPLEX ras error records
+ *
+ * ERRSELR: 4*256 + Unit_ID, Unit_ID: 0 - 4
+ * 1 groups, 5 nodes in total.
+ */
+ ADD_ONE_ERR_GROUP(0x400, ccplex_ras_group),
+};
+
+static void test_ras_inject_serror(uint32_t errselr_el1, uint64_t pfg_ctlr)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1() & MPID_MASK);
+
+ /*
+ * The per-cluster frequency monitoring nodes should be accessed from
+ * CPUs in the cluster that the node belongs to. e.g. nodes 0x200 and
+ * 0x201 should be accessed from CPUs in cluster 0, nodes 0x210 and
+ * 0x211 should be accessed from CPUs in cluster 1 and so on.
+ */
+ if (((errselr_el1 & 0xF00) == 0x200) && ((errselr_el1 >> 4) & 0xF) != (core_pos >> 1)) {
+ return;
+ }
+
+ /* clear the flag before we inject SError */
+ fault_received = 0;
+ dccvac((uint64_t)&fault_received);
+ dmbish();
+
+ INFO("mpidr=0x%lx, errselr_el1=0x%x, pfg_ctlr=0x%llx\n",
+ read_mpidr_el1(), errselr_el1, pfg_ctlr);
+
+ /* Choose error record */
+ write_errselr_el1(errselr_el1);
+
+ /* Program count down timer to 1 */
+ write_erxpfgcdn_el1(1);
+
+ /* Start count down to generate error on expiry */
+ write_erxpfgctl_el1(ERXPFGCTL_UC_BIT | ERXPFGCTL_CDEN_BIT | pfg_ctlr);
+
+ /* wait until the SError fires */
+ do {
+ dccivac((uint64_t)&fault_received);
+ dmbish();
+ } while (fault_received == 0);
+
+ /*
+ * ACLR_EL1, Bit13 = RESET_RAS_FMON
+ *
+ * A write of 1 to this write-only bit re-enables checking for RAS
+ * frequency monitoring errors which are temporarily disabled when
+ * detected.
+ */
+ if (((errselr_el1 & 0xF00) == 0x200) && ((errselr_el1 >> 4) & 0xF) == (core_pos >> 1))
+ write_actlr_el1(read_actlr_el1() | BIT_32(13));
+ else if ((errselr_el1 == 0x404))
+ write_actlr_el1(read_actlr_el1() | BIT_32(13));
+}
+
+static void generate_uncorrectable_faults(void)
+{
+ unsigned int i;
+ unsigned int j;
+ unsigned int k;
+ unsigned int total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(tegra194_ras_records); i++)
+ total += tegra194_ras_records[i].sysreg.num_idx;
+
+ VERBOSE("Total Nodes:%u\n", total);
+
+ for (i = 0; i < ARRAY_SIZE(tegra194_ras_records); i++) {
+
+ const struct err_record_info *info = &tegra194_ras_records[i];
+ uint32_t idx_start = info->sysreg.idx_start;
+ uint32_t num_idx = info->sysreg.num_idx;
+ const struct ras_aux_data *aux_data =
+ (const struct ras_aux_data *)info->aux_data;
+
+ for (j = 0; j < num_idx; j++) {
+ uint32_t errselr_el1 = idx_start + j;
+ uint64_t __unused err_fr;
+ uint64_t uncorr_errs;
+
+ /* Write to ERRSELR_EL1 to select the error record */
+ write_errselr_el1(errselr_el1);
+
+ /*
+ * all supported errors for this node exist in the
+ * top 32 bits
+ */
+ err_fr = read_erxfr_el1();
+ err_fr >>= 32;
+ err_fr <<= 32;
+
+ /*
+ * Mask the uncorrectable errors that are disabled
+ * in the ERXFR register
+ */
+ uncorr_errs = aux_data[j].err_ctrl();
+ uncorr_errs &= err_fr;
+
+ for (k = 32; k < 64; k++) {
+ if (uncorr_errs & BIT_64(k)) {
+ VERBOSE("ERR<x>CTLR bit%d\n", k);
+ test_ras_inject_serror(errselr_el1, BIT_64(k));
+ }
+ }
+ }
+ }
+}
+
+int __unused sdei_handler(int ev, uint64_t arg)
+{
+ fault_received = 1;
+ dccvac((uint64_t)&fault_received);
+ dsbish();
+ VERBOSE("SError SDEI event received.\n");
+ return 0;
+}
+
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
+static volatile uint64_t cpu_powerdown[PLATFORM_CORE_COUNT];
+static volatile uint64_t cpu_start_test[PLATFORM_CORE_COUNT];
+static volatile uint64_t cpu_test_completed[PLATFORM_CORE_COUNT];
+
+static void sdei_register_for_event(int event_id)
+{
+ int64_t ret = 0;
+
+ /* Register SDEI handler */
+ ret = sdei_event_register(event_id, serror_sdei_event_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0)
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+
+ ret = sdei_event_enable(event_id);
+ if (ret < 0)
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+
+ ret = sdei_pe_unmask();
+ if (ret < 0)
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+}
+
+static test_result_t test_cpu_serrors(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ VERBOSE("Hello from core 0x%x\n", mpid);
+
+ /* register for the SDEI event ID */
+ sdei_register_for_event(300 + core_pos);
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ /* Wait until lead CPU asks us to start the test */
+ do {
+ dccivac((uintptr_t)&cpu_start_test[core_pos]);
+ dmbish();
+ } while (!cpu_start_test[core_pos]);
+
+ generate_uncorrectable_faults();
+
+ VERBOSE("0x%lx: test complete\n", read_mpidr_el1());
+
+ /* Inform lead CPU of test completion */
+ cpu_test_completed[core_pos] = true;
+ dccvac((uintptr_t)&cpu_test_completed[core_pos]);
+ dsbish();
+
+ /* Wait until lead CPU asks us to power down */
+ do {
+ dccivac((uintptr_t)&cpu_powerdown[core_pos]);
+ dmbish();
+ } while (!cpu_powerdown[core_pos]);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_ras_uncorrectable(void)
+{
+ const int __unused event_id = 300;
+ int64_t __unused ret = 0;
+ unsigned int cpu_node, cpu_mpid;
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos;
+
+ tftf_testcase_printf("Tegra194 uncorrectable RAS errors.\n");
+
+ /* long execution test; reset watchdog */
+ tftf_platform_watchdog_reset();
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) test_cpu_serrors,
+ 0);
+ if (ret != PSCI_E_SUCCESS)
+ ret = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * The lead CPU needs to wait for all other CPUs to enter the test.
+ * This is because the test framework declares the end of a test when no
+ * CPU is in the test. Therefore, if the lead CPU goes ahead and exits
+ * the test then potentially there could be no CPU executing the test at
+ * this time because none of them have entered the test yet, hence the
+ * framework will be misled in thinking the test is finished.
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ /* register for the SDEI event ID */
+ sdei_register_for_event(300);
+
+ /* Ask all CPUs to start the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /*
+ * Except lead CPU, Wait for all cores to be powered off
+ * by framework
+ */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ /* Allow the CPU to start the test */
+ core_pos = platform_get_core_pos(cpu_mpid);
+ cpu_start_test[core_pos] = START;
+ dccvac((uintptr_t)&cpu_start_test[core_pos]);
+ dsbish();
+
+ /* Wait for the CPU to complete the test */
+ do {
+ dccivac((uintptr_t)&cpu_test_completed[core_pos]);
+ dmbish();
+ } while (!cpu_test_completed[core_pos]);
+ }
+
+ /* run through all supported uncorrectable faults */
+ generate_uncorrectable_faults();
+
+ VERBOSE("0x%lx: test complete\n", read_mpidr_el1());
+
+ /* Wait for all CPUs to power off */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /*
+ * Except lead CPU, Wait for all cores to be powered off
+ * by framework
+ */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ /* Allow other CPUs to start power down sequence */
+ core_pos = platform_get_core_pos(cpu_mpid);
+ cpu_powerdown[core_pos] = START;
+ dccvac((uintptr_t)&cpu_powerdown[core_pos]);
+ dsbish();
+
+ /* Wait for the CPU to actually power off */
+ while (tftf_psci_affinity_info(cpu_mpid, MPIDR_AFFLVL0) != PSCI_STATE_OFF)
+ dsbsy();
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_soc_id.c b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_soc_id.c
index b866450..3fa48df 100644
--- a/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_soc_id.c
+++ b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_soc_id.c
@@ -34,8 +34,6 @@
smc_args args;
smc_ret_values ret;
int32_t expected_ver;
- int32_t skip_cnt = 0;
- bool fail_soc_id_test = false;
/* Check if SMCCC version is at least v1.2 */
expected_ver = MAKE_SMCCC_VERSION(1, 2);
@@ -55,7 +53,7 @@
ret = tftf_smc(&args);
if ((int)ret.ret0 == SMC_ARCH_CALL_NOT_SUPPORTED) {
tftf_testcase_printf("SMCCC_ARCH_SOC_ID is not implemented\n");
- return TEST_RESULT_FAIL;
+ return TEST_RESULT_SKIPPED;
}
/* If the call returns SMC_OK then SMCCC_ARCH_SOC_ID is feature available */
@@ -63,31 +61,29 @@
ret = get_soc_id_param(SMC_GET_SOC_REVISION);
if ((int)ret.ret0 == SMC_ARCH_CALL_INVAL_PARAM) {
- ERROR("Invalid param passed to SMCCC_ARCH_SOC_ID\n");
- fail_soc_id_test = true;
+ tftf_testcase_printf("Invalid param passed to \
+ SMCCC_ARCH_SOC_ID\n");
+ return TEST_RESULT_FAIL;
} else if ((int)ret.ret0 == SMC_ARCH_CALL_NOT_SUPPORTED) {
tftf_testcase_printf("SOC Rev is not implemented\n");
- skip_cnt++;
- } else {
- tftf_testcase_printf("SOC Rev = 0x%x\n", (int)ret.ret0);
+ return TEST_RESULT_FAIL;
}
+ tftf_testcase_printf("SOC Rev = 0x%x\n", (int)ret.ret0);
+
ret = get_soc_id_param(SMC_GET_SOC_VERSION);
if ((int)ret.ret0 == SMC_ARCH_CALL_INVAL_PARAM) {
- ERROR("Invalid param passed to SMCCC_ARCH_SOC_ID\n");
- fail_soc_id_test = true;
+ tftf_testcase_printf("Invalid param passed to \
+ SMCCC_ARCH_SOC_ID\n");
+ return TEST_RESULT_FAIL;
} else if ((int)ret.ret0 == SMC_ARCH_CALL_NOT_SUPPORTED) {
tftf_testcase_printf("SOC Ver is not implemented\n");
- skip_cnt++;
- } else {
- tftf_testcase_printf("SOC Ver = 0x%x\n", (int)ret.ret0);
+ return TEST_RESULT_FAIL;
}
- if (skip_cnt == 2)
- return TEST_RESULT_SKIPPED;
- else if (fail_soc_id_test)
- return TEST_RESULT_FAIL;
+ tftf_testcase_printf("SOC Ver = 0x%x\n", (int)ret.ret0);
+
} else {
ERROR("Invalid error during SMCCC_ARCH_FEATURES call = 0x%x\n",
(int)ret.ret0);
diff --git a/tftf/tests/runtime_services/secure_service/ffa_helpers.c b/tftf/tests/runtime_services/secure_service/ffa_helpers.c
index 9955c7c..c90cac8 100644
--- a/tftf/tests/runtime_services/secure_service/ffa_helpers.c
+++ b/tftf/tests/runtime_services/secure_service/ffa_helpers.c
@@ -9,6 +9,11 @@
#include <ffa_helpers.h>
#include <ffa_svc.h>
+#define OPTEE_FFA_GET_API_VERSION (0)
+#define OPTEE_FFA_GET_OS_VERSION (1)
+#define OPTEE_FFA_GET_OS_VERSION_MAJOR (3)
+#define OPTEE_FFA_GET_OS_VERSION_MINOR (8)
+
/*-----------------------------------------------------------------------------
* FFA_RUN
*
@@ -108,6 +113,49 @@
}
/*
+ * check_spmc_execution_level
+ *
+ * Attempt sending impdef protocol messages to OP-TEE through direct messaging.
+ * Criteria for detecting OP-TEE presence is that responses match defined
+ * version values. In the case of SPMC running at S-EL2 (and Cactus instances
+ * running at S-EL1) the response will not match the pre-defined version IDs.
+ *
+ * Returns true if SPMC is probed as being OP-TEE at S-EL1.
+ *
+ */
+bool check_spmc_execution_level(void)
+{
+ unsigned int is_optee_spmc_criteria = 0U;
+ smc_ret_values ret_values;
+
+ /*
+ * Send a first OP-TEE-defined protocol message through
+ * FFA direct message.
+ *
+ */
+ ret_values = ffa_msg_send_direct_req(HYP_ID, SP_ID(1),
+ OPTEE_FFA_GET_API_VERSION);
+ if ((ret_values.ret3 == FFA_VERSION_MAJOR) &&
+ (ret_values.ret4 == FFA_VERSION_MINOR)) {
+ is_optee_spmc_criteria++;
+ }
+
+ /*
+ * Send a second OP-TEE-defined protocol message through
+ * FFA direct message.
+ *
+ */
+ ret_values = ffa_msg_send_direct_req(HYP_ID, SP_ID(1),
+ OPTEE_FFA_GET_OS_VERSION);
+ if ((ret_values.ret3 == OPTEE_FFA_GET_OS_VERSION_MAJOR) &&
+ (ret_values.ret4 == OPTEE_FFA_GET_OS_VERSION_MINOR)) {
+ is_optee_spmc_criteria++;
+ }
+
+ return (is_optee_spmc_criteria == 2U);
+}
+
+/*
* FFA Version ABI helper.
* Version fields:
* -Bits[30:16]: Major version.
@@ -164,3 +212,38 @@
return tftf_smc(&args);
}
+
+/* Query the higher EL if the requested FF-A feature is implemented. */
+smc_ret_values ffa_features(uint32_t feature)
+{
+ smc_args args = {
+ .fid = FFA_FEATURES,
+ .arg1 = feature
+ };
+
+ return tftf_smc(&args);
+}
+
+/* Get information about VMs or SPs based on UUID */
+smc_ret_values ffa_partition_info_get(const uint32_t uuid[4])
+{
+ smc_args args = {
+ .fid = FFA_PARTITION_INFO_GET,
+ .arg1 = uuid[0],
+ .arg2 = uuid[1],
+ .arg3 = uuid[2],
+ .arg4 = uuid[3]
+ };
+
+ return tftf_smc(&args);
+}
+
+/* Query SPMD that the rx buffer of the partition can be released */
+smc_ret_values ffa_rx_release(void)
+{
+ smc_args args = {
+ .fid = FFA_RX_RELEASE
+ };
+
+ return tftf_smc(&args);
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c b/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c
index c83a403..6008b78 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c
@@ -16,11 +16,6 @@
#define DIRECT_MSG_TEST_PATTERN2 (0xbbbb0000)
#define DIRECT_MSG_TEST_PATTERN3 (0xcccc0000)
-#define OPTEE_FFA_GET_API_VERSION (0)
-#define OPTEE_FFA_GET_OS_VERSION (1)
-#define OPTEE_FFA_GET_OS_VERSION_MAJOR (3)
-#define OPTEE_FFA_GET_OS_VERSION_MINOR (8)
-
static test_result_t send_receive_direct_msg(unsigned int sp_id,
unsigned int test_pattern)
{
@@ -51,49 +46,6 @@
return TEST_RESULT_SUCCESS;
}
-/*
- * check_spmc_execution_level
- *
- * Attempt sending impdef protocol messages to OP-TEE through direct messaging.
- * Criteria for detecting OP-TEE presence is that responses match defined
- * version values. In the case of SPMC running at S-EL2 (and Cactus instances
- * running at S-EL1) the response will not match the pre-defined version IDs.
- *
- * Returns true if SPMC is probed as being OP-TEE at S-EL1.
- *
- */
-static bool check_spmc_execution_level(void)
-{
- unsigned int is_optee_spmc_criteria = 0;
- smc_ret_values ret_values;
-
- /*
- * Send a first OP-TEE-defined protocol message through
- * FFA direct message.
- *
- */
- ret_values = ffa_msg_send_direct_req(HYP_ID, SP_ID(1),
- OPTEE_FFA_GET_API_VERSION);
- if ((ret_values.ret3 == FFA_VERSION_MAJOR) &&
- (ret_values.ret4 == FFA_VERSION_MINOR)) {
- is_optee_spmc_criteria++;
- }
-
- /*
- * Send a second OP-TEE-defined protocol message through
- * FFA direct message.
- *
- */
- ret_values = ffa_msg_send_direct_req(HYP_ID, SP_ID(1),
- OPTEE_FFA_GET_OS_VERSION);
- if ((ret_values.ret3 == OPTEE_FFA_GET_OS_VERSION_MAJOR) &&
- (ret_values.ret4 == OPTEE_FFA_GET_OS_VERSION_MINOR)) {
- is_optee_spmc_criteria++;
- }
-
- return (is_optee_spmc_criteria == 2);
-}
-
test_result_t test_ffa_direct_messaging(void)
{
test_result_t result;
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_features.c b/tftf/tests/runtime_services/secure_service/test_ffa_features.c
new file mode 100644
index 0000000..7d67bf8
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_features.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <ffa_helpers.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+struct feature_test {
+ const char *test_name;
+ unsigned int feature;
+ u_register_t expected_ret;
+};
+
+static const struct feature_test test_target[] = {
+ {"FFA_ERROR_32 check", FFA_ERROR, FFA_SUCCESS_SMC32},
+ {"FFA_SUCCESS_32 check", FFA_SUCCESS_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_INTERRUPT_32 check", FFA_INTERRUPT, FFA_SUCCESS_SMC32},
+ {"FFA_VERSION_32 check", FFA_VERSION, FFA_SUCCESS_SMC32},
+ {"FFA_FEATURES_32 check", FFA_FEATURES, FFA_SUCCESS_SMC32},
+ {"FFA_RX_RELEASE_32 check", FFA_RX_RELEASE, FFA_SUCCESS_SMC32},
+ {"FFA_RXTX_MAP_32 check", FFA_RXTX_MAP_SMC32, FFA_ERROR},
+ {"FFA_RXTX_MAP_64 check", FFA_RXTX_MAP_SMC64, FFA_SUCCESS_SMC32},
+ {"FFA_RXTX_UNMAP_32 check", FFA_RXTX_UNMAP, FFA_ERROR},
+ {"FFA_PARTITION_INFO_GET_32 check", FFA_PARTITION_INFO_GET, FFA_SUCCESS_SMC32},
+ {"FFA_ID_GET_32 check", FFA_ID_GET, FFA_SUCCESS_SMC32},
+ {"FFA_MSG_POLL_32 check", FFA_MSG_POLL, FFA_SUCCESS_SMC32},
+ {"FFA_MSG_WAIT_32 check", FFA_MSG_WAIT, FFA_SUCCESS_SMC32},
+ {"FFA_YIELD_32 check", FFA_MSG_YIELD, FFA_SUCCESS_SMC32},
+ {"FFA_RUN_32 check", FFA_MSG_RUN, FFA_SUCCESS_SMC32},
+ {"FFA_MSG_SEND_32 check", FFA_MSG_SEND, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_DONATE_32 check", FFA_MEM_DONATE_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_LEND_32 check", FFA_MEM_LEND_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_SHARE_32 check", FFA_MEM_SHARE_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RETRIEVE_REQ_32 check", FFA_MEM_RETRIEVE_REQ_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RETRIEVE_RESP_32 check", FFA_MEM_RETRIEVE_RESP, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RELINQUISH_32 check", FFA_MEM_RELINQUISH, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RECLAIM_32 check", FFA_MEM_RECLAIM, FFA_SUCCESS_SMC32},
+ {"Check non-existent command", 0xFFFF, FFA_ERROR}
+};
+
+test_result_t test_ffa_features(void)
+{
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 0);
+
+ /* Check if SPMC is OP-TEE at S-EL1 */
+ if (check_spmc_execution_level()) {
+ /* FFA_FEATURES is not yet supported in OP-TEE */
+ return TEST_RESULT_SUCCESS;
+ }
+
+ smc_ret_values ffa_ret;
+ unsigned int i, test_target_size =
+ sizeof(test_target) / sizeof(struct feature_test);
+
+ for (i = 0U; i < test_target_size; i++) {
+ ffa_ret = ffa_features(test_target[i].feature);
+ if (ffa_ret.ret0 != test_target[i].expected_ret) {
+ tftf_testcase_printf("%s returned %lx, expected %lx\n",
+ test_target[i].test_name,
+ ffa_ret.ret0,
+ test_target[i].expected_ret);
+ return TEST_RESULT_FAIL;
+ }
+ if ((test_target[i].expected_ret == (u_register_t)FFA_ERROR) &&
+ (ffa_ret.ret2 != (u_register_t)FFA_ERROR_NOT_SUPPORTED)) {
+ tftf_testcase_printf("%s failed for the wrong reason: "
+ "returned %lx, expected %lx\n",
+ test_target[i].test_name,
+ ffa_ret.ret2,
+ (u_register_t)FFA_ERROR_NOT_SUPPORTED);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/tests-spm.mk b/tftf/tests/tests-spm.mk
index 0c54e1c..bef373f 100644
--- a/tftf/tests/tests-spm.mk
+++ b/tftf/tests/tests-spm.mk
@@ -9,4 +9,5 @@
ffa_helpers.c \
test_ffa_direct_messaging.c \
test_ffa_version.c \
+ test_ffa_features.c \
)
diff --git a/tftf/tests/tests-spm.xml b/tftf/tests/tests-spm.xml
index 5ed2524..e2f29bf 100644
--- a/tftf/tests/tests-spm.xml
+++ b/tftf/tests/tests-spm.xml
@@ -30,4 +30,10 @@
</testsuite>
+ <testsuite name="PSA FF-A features"
+ description="Test FFA_FEATURES ABI" >
+ <testcase name="Test FFA_FEATURES"
+ function="test_ffa_features" />
+ </testsuite>
+
</testsuites>
diff --git a/tftf/tests/tests-tegra194.mk b/tftf/tests/tests-tegra194.mk
new file mode 100644
index 0000000..890c840
--- /dev/null
+++ b/tftf/tests/tests-tegra194.mk
@@ -0,0 +1,11 @@
+#
+# Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/plat/nvidia/tegra194/, \
+ test_ras_corrected.c \
+ test_ras_uncorrectable.c \
+ serror_handler.S \
+)
diff --git a/tftf/tests/tests-tegra194.xml b/tftf/tests/tests-tegra194.xml
new file mode 100644
index 0000000..49f6f9e
--- /dev/null
+++ b/tftf/tests/tests-tegra194.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="Tegra194 platform tests" description="Tests for Tegra194 platforms">
+ <testcase name="RAS corrected error test" function="test_ras_corrected" />
+ <testcase name="RAS uncorrectable error test" function="test_ras_uncorrectable" />
+ </testsuite>
+
+</testsuites>
diff --git a/tools/generate_json/generate_json.sh b/tools/generate_json/generate_json.sh
index 8dd2630..7405378 100755
--- a/tools/generate_json/generate_json.sh
+++ b/tools/generate_json/generate_json.sh
@@ -22,9 +22,11 @@
if [ "$1" == "cactus" ]; then
echo -e "{\n\t\"$1-primary\" : {\n \
\t\"image\": \"$1.bin\",\n \
- \t\"pm\": \"../../../spm/$1/$1.dts\"\n\t},\n\n\t\"$1-secondary\" : {\n \
+ \t\"pm\": \"../../../spm/$1/$1.dts\",\n \
+ \t\"owner\": \"SiP\"\n\t},\n\n\t\"$1-secondary\" : {\n \
\t\"image\": \"$1.bin\",\n \
- \t\"pm\": \"../../../spm/$1/$1-secondary.dts\" \n \
+ \t\"pm\": \"../../../spm/$1/$1-secondary.dts\",\n \
+ \t\"owner\": \"Plat\"\n \
}\n}" \
> "$GENERATED_JSON"
else