Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2024, Arm Limited. All rights reserved. |
| 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 6 | #include <arch.h> |
| 7 | #include <arch_helpers.h> |
| 8 | #include <arm_arch_svc.h> |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 9 | #include <events.h> |
| 10 | #include <plat_topology.h> |
| 11 | #include <platform.h> |
| 12 | #include <platform_def.h> |
| 13 | #include <power_management.h> |
| 14 | #include <psci.h> |
Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 15 | #include <smccc.h> |
| 16 | #include <sync.h> |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 17 | #include <test_helpers.h> |
Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 18 | #include <tftf_lib.h> |
Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 19 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 20 | static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT]; |
| 21 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 22 | /* Used when catching synchronous exceptions. */ |
| 23 | static volatile bool exception_triggered[PLATFORM_CORE_COUNT]; |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 24 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 25 | /* |
| 26 | * The whole test should only be skipped if the test was skipped on all CPUs. |
| 27 | * The test on each CPU can't return TEST_RESULT_SKIPPED, because the whole test |
| 28 | * is skipped if any of the CPUs return TEST_RESULT_SKIPPED. Instead, to skip a |
| 29 | * test, the test returns TEST_RESULT_SUCCESS, then sets a flag in the |
| 30 | * test_skipped array. This array is checked at the end by the |
| 31 | * run_asymmetric_test function. |
| 32 | */ |
| 33 | static volatile bool test_skipped[PLATFORM_CORE_COUNT]; |
| 34 | |
| 35 | /* |
| 36 | * Test function which is run on each CPU. It is global so it is visible to all |
| 37 | * CPUS. |
| 38 | */ |
| 39 | static test_result_t (*asymmetric_test_function)(void); |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 40 | |
Charlie Bareham | 70de3ff | 2024-08-20 11:27:25 +0100 | [diff] [blame] | 41 | static bool exception_handler(void) |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 42 | { |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 43 | unsigned int mpid = read_mpidr_el1() & MPID_MASK; |
| 44 | unsigned int core_pos = platform_get_core_pos(mpid); |
| 45 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 46 | uint64_t esr_el2 = read_esr_el2(); |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 47 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 48 | if (EC_BITS(esr_el2) == EC_UNKNOWN) { |
Charlie Bareham | 70de3ff | 2024-08-20 11:27:25 +0100 | [diff] [blame] | 49 | /* |
| 50 | * This may be an undef injection, or a trap to EL2 due to a |
| 51 | * register not being present. Both cases have the same EC |
| 52 | * value. |
| 53 | */ |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 54 | exception_triggered[core_pos] = true; |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 55 | return true; |
| 56 | } |
| 57 | |
| 58 | return false; |
| 59 | } |
| 60 | |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 61 | static test_result_t test_trbe(void) |
| 62 | { |
| 63 | unsigned int mpid = read_mpidr_el1() & MPID_MASK; |
| 64 | unsigned int core_pos = platform_get_core_pos(mpid); |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 65 | bool should_trigger_exception = is_trbe_errata_affected_core(); |
| 66 | |
| 67 | if (!is_feat_trbe_present()) { |
| 68 | test_skipped[core_pos] = true; |
| 69 | return TEST_RESULT_SUCCESS; |
| 70 | } |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 71 | |
Charlie Bareham | 70de3ff | 2024-08-20 11:27:25 +0100 | [diff] [blame] | 72 | register_custom_sync_exception_handler(exception_handler); |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 73 | exception_triggered[core_pos] = false; |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 74 | read_trblimitr_el1(); |
Charlie Bareham | 4397e44 | 2024-08-20 10:17:38 +0100 | [diff] [blame] | 75 | unregister_custom_sync_exception_handler(); |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 76 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 77 | /** |
| 78 | * NOTE: TRBE as an asymmetric feature is as exceptional one. |
| 79 | * Even if the hardware supports the feature, TF-A deliberately disables |
| 80 | * it at EL3. In this scenario, when the register "TRBLIMITR_EL1" is |
| 81 | * accessed, the registered undef injection handler should kick in and |
| 82 | * the exception will be handled synchronously at EL2. |
| 83 | */ |
| 84 | if (exception_triggered[core_pos] != should_trigger_exception) { |
Charlie Bareham | 70de3ff | 2024-08-20 11:27:25 +0100 | [diff] [blame] | 85 | tftf_testcase_printf("Exception triggered for core = %d " |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 86 | "when accessing TRB_LIMTR\n", core_pos); |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 87 | return TEST_RESULT_FAIL; |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 88 | } |
| 89 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 90 | return TEST_RESULT_SUCCESS; |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | static test_result_t test_spe(void) |
| 94 | { |
| 95 | unsigned int mpid = read_mpidr_el1() & MPID_MASK; |
| 96 | unsigned int core_pos = platform_get_core_pos(mpid); |
| 97 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 98 | /** |
| 99 | * NOTE: SPE as an asymmetric feature, we expect to access the |
| 100 | * PMSCR_EL1 register, when supported in the hardware. |
| 101 | * If the feature isn't supported, we skip the test. |
| 102 | * So on each individual CPU, we verify whether the feature's presence |
| 103 | * and based on it we access (if feature supported) or skip the test. |
| 104 | */ |
| 105 | if (!is_feat_spe_supported()) { |
| 106 | test_skipped[core_pos] = true; |
| 107 | return TEST_RESULT_SUCCESS; |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 108 | } |
| 109 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 110 | read_pmscr_el1(); |
| 111 | |
| 112 | return TEST_RESULT_SUCCESS; |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 113 | } |
| 114 | |
Jayanth Dodderi Chidanand | f2f1e27 | 2024-09-03 11:49:51 +0100 | [diff] [blame] | 115 | static test_result_t test_tcr2(void) |
| 116 | { |
| 117 | unsigned int mpid = read_mpidr_el1() & MPID_MASK; |
| 118 | unsigned int core_pos = platform_get_core_pos(mpid); |
| 119 | |
| 120 | if (!is_feat_tcr2_supported()) { |
| 121 | test_skipped[core_pos] = true; |
| 122 | return TEST_RESULT_SUCCESS; |
| 123 | } |
| 124 | |
| 125 | read_tcr2_el1(); |
| 126 | |
| 127 | return TEST_RESULT_SUCCESS; |
| 128 | } |
| 129 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 130 | /* |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 131 | * Runs on one CPU, and runs asymmetric_test_function. |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 132 | */ |
| 133 | static test_result_t non_lead_cpu_fn(void) |
| 134 | { |
| 135 | unsigned int mpid = read_mpidr_el1() & MPID_MASK; |
| 136 | unsigned int core_pos = platform_get_core_pos(mpid); |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 137 | test_result_t test_result; |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 138 | |
| 139 | /* Signal to the lead CPU that the calling CPU has entered the test */ |
| 140 | tftf_send_event(&cpu_has_entered_test[core_pos]); |
| 141 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 142 | test_result = asymmetric_test_function(); |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 143 | |
| 144 | /* Ensure that EL3 still functional */ |
| 145 | smc_args args; |
| 146 | smc_ret_values smc_ret; |
| 147 | memset(&args, 0, sizeof(args)); |
| 148 | args.fid = SMCCC_VERSION; |
| 149 | smc_ret = tftf_smc(&args); |
| 150 | |
| 151 | tftf_testcase_printf("SMCCC Version = %d.%d\n", |
| 152 | (int)((smc_ret.ret0 >> SMCCC_VERSION_MAJOR_SHIFT) & SMCCC_VERSION_MAJOR_MASK), |
| 153 | (int)((smc_ret.ret0 >> SMCCC_VERSION_MINOR_SHIFT) & SMCCC_VERSION_MINOR_MASK)); |
| 154 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 155 | return test_result; |
| 156 | } |
| 157 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 158 | /* Set some variables that are accessible to all CPUs. */ |
| 159 | void test_init(test_result_t (*test_function)(void)) |
| 160 | { |
| 161 | int i; |
| 162 | |
| 163 | for (i = 0; i < PLATFORM_CORE_COUNT; i++) { |
| 164 | test_skipped[i] = false; |
| 165 | tftf_init_event(&cpu_has_entered_test[i]); |
| 166 | } |
| 167 | |
| 168 | asymmetric_test_function = test_function; |
| 169 | |
| 170 | /* Ensure the above writes are seen before any read */ |
| 171 | dmbsy(); |
| 172 | } |
| 173 | |
| 174 | /* |
| 175 | * Run the given test function on all CPUs. If the test is skipped on all CPUs, |
| 176 | * the whole test is skipped. This is checked using the test_skipped array. |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 177 | */ |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 178 | test_result_t run_asymmetric_test(test_result_t (*test_function)(void)) |
Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 179 | { |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 180 | unsigned int lead_mpid; |
| 181 | unsigned int cpu_mpid, cpu_node; |
| 182 | unsigned int core_pos; |
| 183 | int psci_ret; |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 184 | bool all_cpus_skipped; |
| 185 | int i; |
| 186 | uint32_t aff_info; |
| 187 | test_result_t test_result; |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 188 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 189 | lead_mpid = read_mpidr_el1() & MPID_MASK; |
| 190 | |
| 191 | SKIP_TEST_IF_LESS_THAN_N_CPUS(2); |
| 192 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 193 | test_init(test_function); |
| 194 | |
| 195 | /* run test on lead CPU */ |
| 196 | test_result = test_function(); |
| 197 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 198 | /* Power on all CPUs */ |
| 199 | for_each_cpu(cpu_node) { |
| 200 | cpu_mpid = tftf_get_mpidr_from_node(cpu_node); |
| 201 | /* Skip lead CPU as it is already powered on */ |
| 202 | if (cpu_mpid == lead_mpid) |
| 203 | continue; |
| 204 | |
| 205 | psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t) non_lead_cpu_fn, 0); |
| 206 | if (psci_ret != PSCI_E_SUCCESS) { |
| 207 | tftf_testcase_printf( |
| 208 | "Failed to power on CPU 0x%x (%d)\n", |
| 209 | cpu_mpid, psci_ret); |
| 210 | return TEST_RESULT_SKIPPED; |
| 211 | } |
| 212 | } |
| 213 | |
| 214 | /* Wait for non-lead CPUs to enter the test */ |
| 215 | for_each_cpu(cpu_node) { |
| 216 | cpu_mpid = tftf_get_mpidr_from_node(cpu_node); |
| 217 | /* Skip lead CPU */ |
| 218 | if (cpu_mpid == lead_mpid) |
| 219 | continue; |
| 220 | |
| 221 | core_pos = platform_get_core_pos(cpu_mpid); |
| 222 | tftf_wait_for_event(&cpu_has_entered_test[core_pos]); |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 223 | } |
| 224 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 225 | /* Wait for all non-lead CPUs to power down */ |
| 226 | for_each_cpu(cpu_node) { |
| 227 | cpu_mpid = tftf_get_mpidr_from_node(cpu_node); |
| 228 | /* Skip lead CPU */ |
| 229 | if (cpu_mpid == lead_mpid) |
| 230 | continue; |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 231 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame] | 232 | do { |
| 233 | aff_info = tftf_psci_affinity_info(cpu_mpid, |
| 234 | MPIDR_AFFLVL0); |
| 235 | } while (aff_info != PSCI_STATE_OFF); |
| 236 | } |
| 237 | |
| 238 | /* |
| 239 | * If the test was skipped on all CPUs, the whole test should be |
| 240 | * skipped. |
| 241 | */ |
| 242 | |
| 243 | all_cpus_skipped = true; |
| 244 | for (i = 0; i < PLATFORM_CORE_COUNT; i++) { |
| 245 | if (!test_skipped[i]) { |
| 246 | all_cpus_skipped = false; |
| 247 | break; |
| 248 | } |
| 249 | } |
| 250 | |
| 251 | if (all_cpus_skipped) { |
| 252 | return TEST_RESULT_SKIPPED; |
| 253 | } else { |
| 254 | return test_result; |
| 255 | } |
| 256 | } |
| 257 | |
| 258 | /* Test Asymmetric Support for FEAT_TRBE */ |
| 259 | test_result_t test_trbe_errata_asymmetric(void) |
| 260 | { |
| 261 | return run_asymmetric_test(test_trbe); |
| 262 | } |
| 263 | |
| 264 | /* Test Asymmetric Support for FEAT_SPE */ |
| 265 | test_result_t test_spe_asymmetric(void) |
| 266 | { |
| 267 | return run_asymmetric_test(test_spe); |
Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 268 | } |
Jayanth Dodderi Chidanand | f2f1e27 | 2024-09-03 11:49:51 +0100 | [diff] [blame] | 269 | |
| 270 | test_result_t test_tcr2_asymmetric(void) |
| 271 | { |
| 272 | return run_asymmetric_test(test_tcr2); |
| 273 | } |