diff options
author | Varun Wadekar <vwadekar@nvidia.com> | 2020-04-24 18:15:39 -0700 |
---|---|---|
committer | Manish Pandey <manish.pandey2@arm.com> | 2020-08-17 21:39:03 +0000 |
commit | 83aa8e4b6d3abbaf43ab453c95befc3a8c281be8 (patch) | |
tree | 510478efec0a8257c56d20dab899683aedf87dd0 /tftf | |
parent | a5799c9f986eb0deb4838e95bc535f7e1b04b117 (diff) | |
download | tf-a-tests-83aa8e4b6d3abbaf43ab453c95befc3a8c281be8.tar.gz |
Tegra194: introduce RAS uncorrectable error injection tests
Platform vendors have some tests that are specific to their platforms.
This patch introduces a test suite for Tegra194 platfoms. As the first
test, the platform will inject the RAS uncorrectable errors and verify
that the platform detects and reports them.
Signed-off-by: Varun Wadekar <vwadekar@nvidia.com>
Change-Id: Id7b26d35dd4c638c2bd92959f18f12423ec1fd8b
Diffstat (limited to 'tftf')
-rw-r--r-- | tftf/tests/plat/nvidia/tegra194/include/tegra194_ras.h | 290 | ||||
-rw-r--r-- | tftf/tests/plat/nvidia/tegra194/serror_handler.S | 23 | ||||
-rw-r--r-- | tftf/tests/plat/nvidia/tegra194/test_ras_uncorrectable.c | 388 | ||||
-rw-r--r-- | tftf/tests/tests-tegra194.mk | 10 | ||||
-rw-r--r-- | tftf/tests/tests-tegra194.xml | 14 |
5 files changed, 725 insertions, 0 deletions
diff --git a/tftf/tests/plat/nvidia/tegra194/include/tegra194_ras.h b/tftf/tests/plat/nvidia/tegra194/include/tegra194_ras.h new file mode 100644 index 000000000..9bbd26145 --- /dev/null +++ b/tftf/tests/plat/nvidia/tegra194/include/tegra194_ras.h @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef TEGRA194_RAS_H +#define TEGRA194_RAS_H + +/* Error record information */ +struct err_record_info { + /* Opaque group-specific data */ + void *aux_data; + struct { + /* + * For error records accessed via. system register, index of + * the error record. + */ + unsigned int idx_start; + unsigned int num_idx; + } sysreg; +}; + +struct err_record_mapping { + struct err_record_info *err_records; + size_t num_err_records; +}; + +/* Implementation defined RAS error and corresponding error message */ +struct ras_error_rec { + const char *error_msg; + /* IERR(bits[15:8]) from ERR<n>STATUS */ + uint8_t error_code; +}; + +/* RAS error node-specific auxiliary data */ +struct ras_aux_data { + /* point to null-terminated ras_error array to convert error code to msg. */ + const struct ras_error_rec *error_records; + /* function to return a value which needs to be programmed into ERXCTLR_EL1 + * to enable all specified RAS errors for current node. + */ + uint64_t (*err_ctrl)(void); +}; + +/* Architecturally-defined primary error code SERR, bits[7:0] from ERR<n>STATUS */ +#define ERR_STATUS_SERR(X) \ + /* SERR, message */ \ + X(0, "No error") \ + X(1, "IMPLEMENTATION DEFINED error") \ + X(2, "Data value from (non-associative) internal memory") \ + X(3, "IMPLEMENTATION DEFINED pin") \ + X(4, "Assertion failure") \ + X(5, "Error detected on internal data path") \ + X(6, "Data value from associative memory") \ + X(7, "Address/control value from associative memory") \ + X(8, "Data value from a TLB") \ + X(9, "Address/control value from a TLB") \ + X(10, "Data value from producer") \ + X(11, "Address/control value from producer") \ + X(12, "Data value from (non-associative) external memory") \ + X(13, "Illegal address (software fault)") \ + X(14, "Illegal access (software fault)") \ + X(15, "Illegal state (software fault)") \ + X(16, "Internal data register") \ + X(17, "Internal control register") \ + X(18, "Error response from slave") \ + X(19, "External timeout") \ + X(20, "Internal timeout") \ + X(21, "Deferred error from slave not supported at master") + +/* IFU Uncorrectable RAS ERROR */ +#define IFU_UNCORR_RAS_ERROR_LIST(X) + +/* JSR_RET Uncorrectable RAS ERROR */ +#define JSR_RET_UNCORR_RAS_ERROR_LIST(X) \ + /* Name, ERR_CTRL, IERR, ISA Desc */ \ + X(JSR_RET, 35, 0x13, "Floating Point Register File Parity Error") \ + X(JSR_RET, 34, 0x12, "Integer Register File Parity Error") \ + X(JSR_RET, 33, 0x11, "Garbage Bundle") \ + X(JSR_RET, 32, 0x10, "Bundle Completion Timeout") + +/* JSR_MTS Uncorrectable RAS ERROR */ +#define JSR_MTS_UNCORR_RAS_ERROR_LIST(X) \ + /* Name, ERR_CTRL, IERR, ISA Desc */ \ + X(JSR_MTS, 40, 0x28, "CoreSight Access Error") \ + X(JSR_MTS, 39, 0x27, "Dual Execution Uncorrectable Error") \ + X(JSR_MTS, 37, 0x25, "CTU MMIO Region") \ + X(JSR_MTS, 36, 0x24, "MTS MMCRAB Region Access") \ + X(JSR_MTS, 35, 0x23, "MTS_CARVEOUT Access from ARM SW") + +/* LSD_STQ Uncorrectable RAS ERROR */ +#define LSD_STQ_UNCORR_RAS_ERROR_LIST(X) \ + /* Name, ERR_CTRL, IERR, ISA Desc */ \ + X(LSD_STQ, 41, 0x39, "Coherent Cache Data Store Multi-Line ECC Error") \ + X(LSD_STQ, 40, 0x38, "Coherent Cache Data Store Uncorrectable ECC Error") \ + X(LSD_STQ, 38, 0x36, "Coherent Cache Data Load Uncorrectable ECC Error") \ + X(LSD_STQ, 33, 0x31, "Coherent Cache Tag Store Parity Error") \ + X(LSD_STQ, 32, 0x30, "Coherent Cache Tag Load Parity Error") + +/* LSD_DCC Uncorrectable RAS ERROR */ +#define LSD_DCC_UNCORR_RAS_ERROR_LIST(X) \ + /* Name, ERR_CTRL, IERR, ISA Desc */ \ + X(LSD_DCC, 41, 0x49, "BTU Copy Mini-Cache PPN Multi-Hit Error") \ + X(LSD_DCC, 39, 0x47, "Coherent Cache Data Uncorrectable ECC Error") \ + X(LSD_DCC, 37, 0x45, "Version Cache Byte-Enable Parity Error") \ + X(LSD_DCC, 36, 0x44, "Version Cache Data Uncorrectable ECC Error") \ + X(LSD_DCC, 33, 0x41, "BTU Copy Coherent Cache PPN Parity Error") \ + X(LSD_DCC, 32, 0x40, "BTU Copy Coherent Cache VPN Parity Error") + +/* LSD_L1HPF Uncorrectable RAS ERROR */ +#define LSD_L1HPF_UNCORR_RAS_ERROR_LIST(X) + +/* L2 Uncorrectable RAS ERROR */ +#define L2_UNCORR_RAS_ERROR_LIST(X) \ + /* Name, ERR_CTRL, IERR, ISA Desc */ \ + X(L2, 56, 0x68, "URT Timeout") \ + X(L2, 55, 0x67, "L2 Protocol Violation") \ + X(L2, 54, 0x66, "SCF to L2 Slave Error Read") \ + X(L2, 53, 0x65, "SCF to L2 Slave Error Write") \ + X(L2, 52, 0x64, "SCF to L2 Decode Error Read") \ + X(L2, 51, 0x63, "SCF to L2 Decode Error Write") \ + X(L2, 50, 0x62, "SCF to L2 Request Response Interface Parity Errors") \ + X(L2, 49, 0x61, "SCF to L2 Advance notice interface parity errors") \ + X(L2, 48, 0x60, "SCF to L2 Filldata Parity Errors") \ + X(L2, 47, 0x5F, "SCF to L2 UnCorrectable ECC Data Error on interface") \ + X(L2, 45, 0x5D, "Core 1 to L2 Parity Error") \ + X(L2, 44, 0x5C, "Core 0 to L2 Parity Error") \ + X(L2, 43, 0x5B, "L2 Multi-Hit") \ + X(L2, 42, 0x5A, "L2 URT Tag Parity Error") \ + X(L2, 41, 0x59, "L2 NTT Tag Parity Error") \ + X(L2, 40, 0x58, "L2 MLT Tag Parity Error") \ + X(L2, 39, 0x57, "L2 URD Data") \ + X(L2, 38, 0x56, "L2 NTP Data") \ + X(L2, 36, 0x54, "L2 MLC Uncorrectable Clean") \ + X(L2, 35, 0x53, "L2 URD Uncorrectable Dirty") \ + X(L2, 34, 0x52, "L2 MLC Uncorrectable Dirty") + +/* CLUSTER_CLOCKS Uncorrectable RAS ERROR */ +#define CLUSTER_CLOCKS_UNCORR_RAS_ERROR_LIST(X) \ + /* Name, ERR_CTRL, IERR, ISA Desc */ \ + X(CLUSTER_CLOCKS, 32, 0xE4, "Frequency Monitor Error") + +/* MMU Uncorrectable RAS ERROR */ +#define MMU_UNCORR_RAS_ERROR_LIST(X) + +/* L3 Uncorrectable RAS ERROR */ +#define L3_UNCORR_RAS_ERROR_LIST(X) \ + /* Name, ERR_CTRL, IERR, ISA Desc */ \ + X(L3, 43, 0x7B, "SNOC Interface Parity Error") \ + X(L3, 42, 0x7A, "MCF Interface Parity Error") \ + X(L3, 41, 0x79, "L3 Tag Parity Error") \ + X(L3, 40, 0x78, "L3 Dir Parity Error") \ + X(L3, 39, 0x77, "L3 Uncorrectable ECC Error") \ + X(L3, 37, 0x75, "Multi-Hit CAM Error") \ + X(L3, 36, 0x74, "Multi-Hit Tag Error") \ + X(L3, 35, 0x73, "Unrecognized Command Error") \ + X(L3, 34, 0x72, "L3 Protocol Error") + +/* CCPMU Uncorrectable RAS ERROR */ +#define CCPMU_UNCORR_RAS_ERROR_LIST(X) \ + /* Name, ERR_CTRL, IERR, ISA Desc */ \ + X(CCPMU, 33, 0x81, "CRAB Access Error") + +/* SCF_IOB Uncorrectable RAS ERROR */ +#define SCF_IOB_UNCORR_RAS_ERROR_LIST(X) \ + /* Name, ERR_CTRL, IERR, ISA Desc */ \ + X(SCF_IOB, 41, 0x99, "Request parity error") \ + X(SCF_IOB, 40, 0x98, "Putdata parity error") \ + X(SCF_IOB, 39, 0x97, "Uncorrectable ECC on Putdata") \ + X(SCF_IOB, 38, 0x96, "CBB Interface Error") \ + X(SCF_IOB, 37, 0x95, "MMCRAB Error") \ + X(SCF_IOB, 36, 0x94, "IHI Interface Error") \ + X(SCF_IOB, 35, 0x93, "CRI Error") \ + X(SCF_IOB, 34, 0x92, "TBX Interface Error") \ + X(SCF_IOB, 33, 0x91, "EVP Interface Error") + +/* SCF_SNOC Uncorrectable RAS ERROR */ +#define SCF_SNOC_UNCORR_RAS_ERROR_LIST(X) \ + /* Name, ERR_CTRL, IERR, ISA Desc */ \ + X(SCF_SNOC, 42, 0xAA, "Misc Client Parity Error") \ + X(SCF_SNOC, 41, 0xA9, "Misc Filldata Parity Error") \ + X(SCF_SNOC, 40, 0xA8, "Uncorrectable ECC Misc Client") \ + X(SCF_SNOC, 39, 0xA7, "DVMU Interface Parity Error") \ + X(SCF_SNOC, 38, 0xA6, "DVMU Interface Timeout Error") \ + X(SCF_SNOC, 37, 0xA5, "CPE Request Error") \ + X(SCF_SNOC, 36, 0xA4, "CPE Response Error") \ + X(SCF_SNOC, 35, 0xA3, "CPE Timeout Error") \ + X(SCF_SNOC, 34, 0xA2, "Uncorrectable Carveout Error") + +/* SCF_CTU Uncorrectable RAS ERROR */ +#define SCF_CTU_UNCORR_RAS_ERROR_LIST(X) \ + /* Name, ERR_CTRL, IERR, ISA Desc */ \ + X(SCF_CTU, 39, 0xB7, "Timeout error for TRC_DMA request") \ + X(SCF_CTU, 38, 0xB6, "Timeout error for CTU Snp") \ + X(SCF_CTU, 37, 0xB5, "Parity error in CTU TAG RAM") \ + X(SCF_CTU, 36, 0xB3, "Parity error in CTU DATA RAM") \ + X(SCF_CTU, 35, 0xB4, "Parity error for Cluster Rsp") \ + X(SCF_CTU, 34, 0xB2, "Parity error for TRL requests from 9 agents") \ + X(SCF_CTU, 33, 0xB1, "Parity error for MCF request") \ + X(SCF_CTU, 32, 0xB0, "TRC DMA fillsnoop parity error") + +/* CMU_CLOCKS Uncorrectable RAS ERROR */ +#define CMU_CLOCKS_UNCORR_RAS_ERROR_LIST(X) \ + /* Name, ERR_CTRL, IERR, ISA Desc */ \ + X(CMU_CLOCKS, 39, 0xC7, "Cluster 3 frequency monitor error") \ + X(CMU_CLOCKS, 38, 0xC6, "Cluster 2 frequency monitor error") \ + X(CMU_CLOCKS, 37, 0xC5, "Cluster 1 frequency monitor error") \ + X(CMU_CLOCKS, 36, 0xC3, "Cluster 0 frequency monitor error") \ + X(CMU_CLOCKS, 35, 0xC4, "Voltage error on ADC1 Monitored Logic") \ + X(CMU_CLOCKS, 34, 0xC2, "Voltage error on ADC0 Monitored Logic") \ + X(CMU_CLOCKS, 33, 0xC1, "Lookup Table 1 Parity Error") \ + X(CMU_CLOCKS, 32, 0xC0, "Lookup Table 0 Parity Error") + +/* + * Define one ras_error entry. + * This macro wille be used to to generate ras_error records for each node + * defined by <NODE_NAME>_UNCORR_RAS_ERROR_LIST macro. + */ +#define DEFINE_ONE_RAS_ERROR_MSG(unit, ras_bit, ierr, msg) \ + { \ + .error_msg = (msg), \ + .error_code = (ierr) \ + }, + +/* + * Set one implementation defined bit in ERR<n>CTLR + * This macro will be used to collect all defined ERR_CTRL bits for each node + * defined by <NODE_NAME>_UNCORR_RAS_ERROR_LIST macro. + */ +#define DEFINE_ENABLE_RAS_BIT(unit, ras_bit, ierr, msg) \ + do { \ + val |= (1ULL << ras_bit##U); \ + } while (0); + +/* Represent one RAS node with 0 or more error bits(ERR_CTLR) enabled */ +#define DEFINE_ONE_RAS_NODE(node) \ +static const struct ras_error_rec node##_uncorr_ras_errors[] = { \ + node##_UNCORR_RAS_ERROR_LIST(DEFINE_ONE_RAS_ERROR_MSG) \ + {NULL, 0U}, \ +}; \ +static inline uint64_t node##_err_ctrl(void) \ +{ \ + uint64_t val = 0ULL; \ + node##_UNCORR_RAS_ERROR_LIST(DEFINE_ENABLE_RAS_BIT) \ + return val; \ +} + +#define DEFINE_ONE_RAS_AUX_DATA(node) \ + { \ + .error_records = node##_uncorr_ras_errors, \ + .err_ctrl = &node##_err_ctrl \ + }, + +#define PER_CORE_RAS_NODE_LIST(X) \ + X(IFU) \ + X(JSR_RET) \ + X(JSR_MTS) \ + X(LSD_STQ) \ + X(LSD_DCC) \ + X(LSD_L1HPF) + +#define PER_CORE_RAS_GROUP_NODES PER_CORE_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) + +#define PER_CLUSTER_RAS_NODE_LIST(X) \ + X(L2) \ + X(CLUSTER_CLOCKS) \ + X(MMU) + +#define PER_CLUSTER_RAS_GROUP_NODES PER_CLUSTER_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) + +#define SCF_L3_BANK_RAS_NODE_LIST(X) X(L3) + +/* we have 4 SCF_L3 nodes:3*256 + L3_Bank_ID(0-3) */ +#define SCF_L3_BANK_RAS_GROUP_NODES \ + SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) \ + SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) \ + SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) \ + SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) + +#define CCPLEX_RAS_NODE_LIST(X) \ + X(CCPMU) \ + X(SCF_IOB) \ + X(SCF_SNOC) \ + X(SCF_CTU) \ + X(CMU_CLOCKS) + +#define CCPLEX_RAS_GROUP_NODES CCPLEX_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) + +#endif /* TEGRA194_RAS_H */ diff --git a/tftf/tests/plat/nvidia/tegra194/serror_handler.S b/tftf/tests/plat/nvidia/tegra194/serror_handler.S new file mode 100644 index 000000000..f040e5d47 --- /dev/null +++ b/tftf/tests/plat/nvidia/tegra194/serror_handler.S @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <sdei.h> + + .globl serror_sdei_event_handler +/* + * SDEI event handler for SErrors. + */ +func serror_sdei_event_handler + stp x29, x30, [sp, #-16]! + bl sdei_handler + ldp x29, x30, [sp], #16 + mov_imm x0, SDEI_EVENT_COMPLETE + mov x1, xzr + smc #0 + b . +endfunc serror_sdei_event_handler diff --git a/tftf/tests/plat/nvidia/tegra194/test_ras_uncorrectable.c b/tftf/tests/plat/nvidia/tegra194/test_ras_uncorrectable.c new file mode 100644 index 000000000..b52a35c9d --- /dev/null +++ b/tftf/tests/plat/nvidia/tegra194/test_ras_uncorrectable.c @@ -0,0 +1,388 @@ +/* + * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <debug.h> +#include <events.h> +#include <lib/irq.h> +#include <power_management.h> +#include <sdei.h> +#include <test_helpers.h> +#include <tftf_lib.h> + +#include <platform.h> + +#include "include/tegra194_ras.h" + +/* Macro to indicate CPU to start an action */ +#define START U(0xAA55) + +/* Global flag to indicate that a fault was received */ +static volatile uint64_t fault_received; + +/* SDEI handler to receive RAS UC errors */ +extern int serror_sdei_event_handler(int ev, uint64_t arg); + +/* NVIDIA Pseudo fault generation registers */ +#define T194_ERXPFGCTL_EL1 S3_0_C15_C1_4 +#define T194_ERXPFGCDN_EL1 S3_0_C15_C1_6 +DEFINE_RENAME_SYSREG_RW_FUNCS(erxpfgctl_el1, T194_ERXPFGCTL_EL1) +DEFINE_RENAME_SYSREG_RW_FUNCS(erxpfgcdn_el1, T194_ERXPFGCDN_EL1) + +/* Instantiate RAS nodes */ +PER_CORE_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE); +PER_CLUSTER_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE); +SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE); +CCPLEX_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE); + +/* Instantiate RAS node groups */ +static __unused struct ras_aux_data per_core_ras_group[] = { + PER_CORE_RAS_GROUP_NODES +}; + +static __unused struct ras_aux_data per_cluster_ras_group[] = { + PER_CLUSTER_RAS_GROUP_NODES +}; + +static __unused struct ras_aux_data scf_l3_ras_group[] = { + SCF_L3_BANK_RAS_GROUP_NODES +}; + +static __unused struct ras_aux_data ccplex_ras_group[] = { + CCPLEX_RAS_GROUP_NODES +}; + +/* + * we have same probe and handler for each error record group, use a macro to + * simply the record definition. + */ +#define ADD_ONE_ERR_GROUP(errselr_start, group) \ + { \ + .sysreg.idx_start = (errselr_start), \ + .sysreg.num_idx = ARRAY_SIZE((group)), \ + .aux_data = (group) \ + } + +/* RAS error record group information */ +static struct err_record_info tegra194_ras_records[] = { + /* + * Per core RAS error records + * + * ERRSELR starts from (0*256 + Logical_CPU_ID*16 + 0) to + * (0*256 + Logical_CPU_ID*16 + 5) for each group. + * 8 cores/groups, 6 * 8 nodes in total. + */ + ADD_ONE_ERR_GROUP(0x000, per_core_ras_group), + ADD_ONE_ERR_GROUP(0x010, per_core_ras_group), + ADD_ONE_ERR_GROUP(0x020, per_core_ras_group), + ADD_ONE_ERR_GROUP(0x030, per_core_ras_group), + ADD_ONE_ERR_GROUP(0x040, per_core_ras_group), + ADD_ONE_ERR_GROUP(0x050, per_core_ras_group), + ADD_ONE_ERR_GROUP(0x060, per_core_ras_group), + ADD_ONE_ERR_GROUP(0x070, per_core_ras_group), + + /* + * Per cluster ras error records + * + * ERRSELR starts from 2*256 + Logical_Cluster_ID*16 + 0 to + * 2*256 + Logical_Cluster_ID*16 + 3. + * 4 clusters/groups, 3 * 4 nodes in total. + */ + ADD_ONE_ERR_GROUP(0x200, per_cluster_ras_group), + ADD_ONE_ERR_GROUP(0x210, per_cluster_ras_group), + ADD_ONE_ERR_GROUP(0x220, per_cluster_ras_group), + ADD_ONE_ERR_GROUP(0x230, per_cluster_ras_group), + + /* + * SCF L3_Bank ras error records + * + * ERRSELR: 3*256 + L3_Bank_ID, L3_Bank_ID: 0-3 + * 1 groups, 4 nodes in total. + */ + ADD_ONE_ERR_GROUP(0x300, scf_l3_ras_group), + + /* + * CCPLEX ras error records + * + * ERRSELR: 4*256 + Unit_ID, Unit_ID: 0 - 4 + * 1 groups, 5 nodes in total. + */ + ADD_ONE_ERR_GROUP(0x400, ccplex_ras_group), +}; + +static void test_ras_inject_serror(uint32_t errselr_el1, uint64_t pfg_ctlr) +{ + unsigned int core_pos = platform_get_core_pos(read_mpidr_el1() & MPID_MASK); + + /* + * The per-cluster frequency monitoring nodes should be accessed from + * CPUs in the cluster that the node belongs to. e.g. nodes 0x200 and + * 0x201 should be accessed from CPUs in cluster 0, nodes 0x210 and + * 0x211 should be accessed from CPUs in cluster 1 and so on. + */ + if (((errselr_el1 & 0xF00) == 0x200) && ((errselr_el1 >> 4) & 0xF) != (core_pos >> 1)) { + return; + } + + /* clear the flag before we inject SError */ + fault_received = 0; + dccvac((uint64_t)&fault_received); + dmbish(); + + INFO("mpidr=0x%lx, errselr_el1=0x%x, pfg_ctlr=0x%llx\n", + read_mpidr_el1(), errselr_el1, pfg_ctlr); + + /* Choose error record */ + write_errselr_el1(errselr_el1); + + /* Program count down timer to 1 */ + write_erxpfgcdn_el1(1); + + /* Start count down to generate error on expiry */ + write_erxpfgctl_el1(ERXPFGCTL_UC_BIT | ERXPFGCTL_CDEN_BIT | pfg_ctlr); + + /* wait until the SError fires */ + do { + dccivac((uint64_t)&fault_received); + dmbish(); + } while (fault_received == 0); + + /* + * ACLR_EL1, Bit13 = RESET_RAS_FMON + * + * A write of 1 to this write-only bit re-enables checking for RAS + * frequency monitoring errors which are temporarily disabled when + * detected. + */ + if (((errselr_el1 & 0xF00) == 0x200) && ((errselr_el1 >> 4) & 0xF) == (core_pos >> 1)) + write_actlr_el1(read_actlr_el1() | BIT_32(13)); + else if ((errselr_el1 == 0x404)) + write_actlr_el1(read_actlr_el1() | BIT_32(13)); +} + +static void generate_uncorrectable_faults(void) +{ + unsigned int i; + unsigned int j; + unsigned int k; + unsigned int total = 0; + + for (i = 0; i < ARRAY_SIZE(tegra194_ras_records); i++) + total += tegra194_ras_records[i].sysreg.num_idx; + + VERBOSE("Total Nodes:%u\n", total); + + for (i = 0; i < ARRAY_SIZE(tegra194_ras_records); i++) { + + const struct err_record_info *info = &tegra194_ras_records[i]; + uint32_t idx_start = info->sysreg.idx_start; + uint32_t num_idx = info->sysreg.num_idx; + const struct ras_aux_data *aux_data = + (const struct ras_aux_data *)info->aux_data; + + for (j = 0; j < num_idx; j++) { + uint32_t errselr_el1 = idx_start + j; + uint64_t __unused err_fr; + uint64_t uncorr_errs; + + /* Write to ERRSELR_EL1 to select the error record */ + write_errselr_el1(errselr_el1); + + /* + * all supported errors for this node exist in the + * top 32 bits + */ + err_fr = read_erxfr_el1(); + err_fr >>= 32; + err_fr <<= 32; + + /* + * Mask the uncorrectable errors that are disabled + * in the ERXFR register + */ + uncorr_errs = aux_data[j].err_ctrl(); + uncorr_errs &= err_fr; + + for (k = 32; k < 64; k++) { + if (uncorr_errs & BIT_64(k)) { + VERBOSE("ERR<x>CTLR bit%d\n", k); + test_ras_inject_serror(errselr_el1, BIT_64(k)); + } + } + } + } +} + +int __unused sdei_handler(int ev, uint64_t arg) +{ + fault_received = 1; + dccvac((uint64_t)&fault_received); + dsbish(); + VERBOSE("SError SDEI event received.\n"); + return 0; +} + +static event_t cpu_booted[PLATFORM_CORE_COUNT]; +static volatile uint64_t cpu_powerdown[PLATFORM_CORE_COUNT]; +static volatile uint64_t cpu_start_test[PLATFORM_CORE_COUNT]; +static volatile uint64_t cpu_test_completed[PLATFORM_CORE_COUNT]; + +static void sdei_register_for_event(int event_id) +{ + int64_t ret = 0; + + /* Register SDEI handler */ + ret = sdei_event_register(event_id, serror_sdei_event_handler, 0, + SDEI_REGF_RM_PE, read_mpidr_el1()); + if (ret < 0) + tftf_testcase_printf("SDEI event register failed: 0x%llx\n", + ret); + + ret = sdei_event_enable(event_id); + if (ret < 0) + tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret); + + ret = sdei_pe_unmask(); + if (ret < 0) + tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret); +} + +static test_result_t test_cpu_serrors(void) +{ + unsigned int mpid = read_mpidr_el1() & MPID_MASK; + unsigned int core_pos = platform_get_core_pos(mpid); + + VERBOSE("Hello from core 0x%x\n", mpid); + + /* register for the SDEI event ID */ + sdei_register_for_event(300 + core_pos); + + /* Tell the lead CPU that the calling CPU has entered the test */ + tftf_send_event(&cpu_booted[core_pos]); + + /* Wait until lead CPU asks us to start the test */ + do { + dccivac((uintptr_t)&cpu_start_test[core_pos]); + dmbish(); + } while (!cpu_start_test[core_pos]); + + generate_uncorrectable_faults(); + + VERBOSE("0x%lx: test complete\n", read_mpidr_el1()); + + /* Inform lead CPU of test completion */ + cpu_test_completed[core_pos] = true; + dccvac((uintptr_t)&cpu_test_completed[core_pos]); + dsbish(); + + /* Wait until lead CPU asks us to power down */ + do { + dccivac((uintptr_t)&cpu_powerdown[core_pos]); + dmbish(); + } while (!cpu_powerdown[core_pos]); + + return TEST_RESULT_SUCCESS; +} + +test_result_t test_ras_uncorrectable(void) +{ + const int __unused event_id = 300; + int64_t __unused ret = 0; + unsigned int cpu_node, cpu_mpid; + unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK; + unsigned int core_pos; + + tftf_testcase_printf("Tegra194 uncorrectable RAS errors.\n"); + + /* long execution test; reset watchdog */ + tftf_platform_watchdog_reset(); + + /* Power on all CPUs */ + for_each_cpu(cpu_node) { + + cpu_mpid = tftf_get_mpidr_from_node(cpu_node); + /* Skip lead CPU, it is already powered on */ + if (cpu_mpid == lead_mpid) + continue; + + ret = tftf_cpu_on(cpu_mpid, + (uintptr_t) test_cpu_serrors, + 0); + if (ret != PSCI_E_SUCCESS) + ret = TEST_RESULT_FAIL; + } + + /* + * The lead CPU needs to wait for all other CPUs to enter the test. + * This is because the test framework declares the end of a test when no + * CPU is in the test. Therefore, if the lead CPU goes ahead and exits + * the test then potentially there could be no CPU executing the test at + * this time because none of them have entered the test yet, hence the + * framework will be misled in thinking the test is finished. + */ + for_each_cpu(cpu_node) { + cpu_mpid = tftf_get_mpidr_from_node(cpu_node); + /* Skip lead CPU */ + if (cpu_mpid == lead_mpid) + continue; + + core_pos = platform_get_core_pos(cpu_mpid); + tftf_wait_for_event(&cpu_booted[core_pos]); + } + + /* register for the SDEI event ID */ + sdei_register_for_event(300); + + /* Ask all CPUs to start the test */ + for_each_cpu(cpu_node) { + cpu_mpid = tftf_get_mpidr_from_node(cpu_node); + /* + * Except lead CPU, Wait for all cores to be powered off + * by framework + */ + if (cpu_mpid == lead_mpid) + continue; + + /* Allow the CPU to start the test */ + core_pos = platform_get_core_pos(cpu_mpid); + cpu_start_test[core_pos] = START; + dccvac((uintptr_t)&cpu_start_test[core_pos]); + dsbish(); + + /* Wait for the CPU to complete the test */ + do { + dccivac((uintptr_t)&cpu_test_completed[core_pos]); + dmbish(); + } while (!cpu_test_completed[core_pos]); + } + + /* run through all supported uncorrectable faults */ + generate_uncorrectable_faults(); + + VERBOSE("0x%lx: test complete\n", read_mpidr_el1()); + + /* Wait for all CPUs to power off */ + for_each_cpu(cpu_node) { + cpu_mpid = tftf_get_mpidr_from_node(cpu_node); + /* + * Except lead CPU, Wait for all cores to be powered off + * by framework + */ + if (cpu_mpid == lead_mpid) + continue; + + /* Allow other CPUs to start power down sequence */ + core_pos = platform_get_core_pos(cpu_mpid); + cpu_powerdown[core_pos] = START; + dccvac((uintptr_t)&cpu_powerdown[core_pos]); + dsbish(); + + /* Wait for the CPU to actually power off */ + while (tftf_psci_affinity_info(cpu_mpid, MPIDR_AFFLVL0) != PSCI_STATE_OFF) + dsbsy(); + } + + return TEST_RESULT_SUCCESS; +} diff --git a/tftf/tests/tests-tegra194.mk b/tftf/tests/tests-tegra194.mk new file mode 100644 index 000000000..7b26945a0 --- /dev/null +++ b/tftf/tests/tests-tegra194.mk @@ -0,0 +1,10 @@ +# +# Copyright (c) 2020, NVIDIA Corporation. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +TESTS_SOURCES += $(addprefix tftf/tests/plat/nvidia/tegra194/, \ + test_ras_uncorrectable.c \ + serror_handler.S \ +) diff --git a/tftf/tests/tests-tegra194.xml b/tftf/tests/tests-tegra194.xml new file mode 100644 index 000000000..c1371f73b --- /dev/null +++ b/tftf/tests/tests-tegra194.xml @@ -0,0 +1,14 @@ +<?xml version="1.0" encoding="utf-8"?> + +<!-- + Copyright (c) 2020, NVIDIA Corporation. All rights reserved. + + SPDX-License-Identifier: BSD-3-Clause +--> + +<testsuites> + <testsuite name="Tegra194 platform tests" description="Tests for Tegra194 platforms"> + <testcase name="RAS uncorrectable error test" function="test_ras_uncorrectable" /> + </testsuite> + +</testsuites> |