Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2024, Arm Limited. All rights reserved. |
| 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 6 | #include <arch.h> |
| 7 | #include <arch_helpers.h> |
| 8 | #include <arm_arch_svc.h> |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 9 | #include <events.h> |
| 10 | #include <plat_topology.h> |
| 11 | #include <platform.h> |
| 12 | #include <platform_def.h> |
| 13 | #include <power_management.h> |
| 14 | #include <psci.h> |
Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 15 | #include <smccc.h> |
| 16 | #include <sync.h> |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 17 | #include <test_helpers.h> |
Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 18 | #include <tftf_lib.h> |
Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 19 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 20 | static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT]; |
| 21 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 22 | /* Used when catching synchronous exceptions. */ |
| 23 | static volatile bool exception_triggered[PLATFORM_CORE_COUNT]; |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 24 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 25 | /* |
| 26 | * The whole test should only be skipped if the test was skipped on all CPUs. |
| 27 | * The test on each CPU can't return TEST_RESULT_SKIPPED, because the whole test |
| 28 | * is skipped if any of the CPUs return TEST_RESULT_SKIPPED. Instead, to skip a |
| 29 | * test, the test returns TEST_RESULT_SUCCESS, then sets a flag in the |
| 30 | * test_skipped array. This array is checked at the end by the |
| 31 | * run_asymmetric_test function. |
| 32 | */ |
| 33 | static volatile bool test_skipped[PLATFORM_CORE_COUNT]; |
| 34 | |
| 35 | /* |
| 36 | * Test function which is run on each CPU. It is global so it is visible to all |
| 37 | * CPUS. |
| 38 | */ |
| 39 | static test_result_t (*asymmetric_test_function)(void); |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 40 | |
Charlie Bareham | 70de3ff | 2024-08-20 11:27:25 +0100 | [diff] [blame] | 41 | static bool exception_handler(void) |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 42 | { |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 43 | unsigned int mpid = read_mpidr_el1() & MPID_MASK; |
| 44 | unsigned int core_pos = platform_get_core_pos(mpid); |
| 45 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 46 | uint64_t esr_el2 = read_esr_el2(); |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 47 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 48 | if (EC_BITS(esr_el2) == EC_UNKNOWN) { |
Charlie Bareham | 70de3ff | 2024-08-20 11:27:25 +0100 | [diff] [blame] | 49 | /* |
| 50 | * This may be an undef injection, or a trap to EL2 due to a |
| 51 | * register not being present. Both cases have the same EC |
| 52 | * value. |
| 53 | */ |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 54 | exception_triggered[core_pos] = true; |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 55 | return true; |
| 56 | } |
| 57 | |
| 58 | return false; |
| 59 | } |
| 60 | |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 61 | static test_result_t test_trbe(void) |
| 62 | { |
| 63 | unsigned int mpid = read_mpidr_el1() & MPID_MASK; |
| 64 | unsigned int core_pos = platform_get_core_pos(mpid); |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 65 | bool should_trigger_exception = is_trbe_errata_affected_core(); |
| 66 | |
| 67 | if (!is_feat_trbe_present()) { |
| 68 | test_skipped[core_pos] = true; |
| 69 | return TEST_RESULT_SUCCESS; |
| 70 | } |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 71 | |
Charlie Bareham | 70de3ff | 2024-08-20 11:27:25 +0100 | [diff] [blame] | 72 | register_custom_sync_exception_handler(exception_handler); |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 73 | exception_triggered[core_pos] = false; |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 74 | read_trblimitr_el1(); |
Charlie Bareham | 4397e44 | 2024-08-20 10:17:38 +0100 | [diff] [blame] | 75 | unregister_custom_sync_exception_handler(); |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 76 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 77 | /** |
| 78 | * NOTE: TRBE as an asymmetric feature is as exceptional one. |
| 79 | * Even if the hardware supports the feature, TF-A deliberately disables |
| 80 | * it at EL3. In this scenario, when the register "TRBLIMITR_EL1" is |
| 81 | * accessed, the registered undef injection handler should kick in and |
| 82 | * the exception will be handled synchronously at EL2. |
| 83 | */ |
| 84 | if (exception_triggered[core_pos] != should_trigger_exception) { |
Charlie Bareham | 70de3ff | 2024-08-20 11:27:25 +0100 | [diff] [blame] | 85 | tftf_testcase_printf("Exception triggered for core = %d " |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 86 | "when accessing TRB_LIMTR\n", core_pos); |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 87 | return TEST_RESULT_FAIL; |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 88 | } |
| 89 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 90 | return TEST_RESULT_SUCCESS; |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | static test_result_t test_spe(void) |
| 94 | { |
| 95 | unsigned int mpid = read_mpidr_el1() & MPID_MASK; |
| 96 | unsigned int core_pos = platform_get_core_pos(mpid); |
| 97 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 98 | /** |
| 99 | * NOTE: SPE as an asymmetric feature, we expect to access the |
| 100 | * PMSCR_EL1 register, when supported in the hardware. |
| 101 | * If the feature isn't supported, we skip the test. |
| 102 | * So on each individual CPU, we verify whether the feature's presence |
| 103 | * and based on it we access (if feature supported) or skip the test. |
| 104 | */ |
| 105 | if (!is_feat_spe_supported()) { |
| 106 | test_skipped[core_pos] = true; |
| 107 | return TEST_RESULT_SUCCESS; |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 108 | } |
| 109 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 110 | read_pmscr_el1(); |
| 111 | |
| 112 | return TEST_RESULT_SUCCESS; |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 113 | } |
| 114 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 115 | /* |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 116 | * Runs on one CPU, and runs asymmetric_test_function. |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 117 | */ |
| 118 | static test_result_t non_lead_cpu_fn(void) |
| 119 | { |
| 120 | unsigned int mpid = read_mpidr_el1() & MPID_MASK; |
| 121 | unsigned int core_pos = platform_get_core_pos(mpid); |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 122 | test_result_t test_result; |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 123 | |
| 124 | /* Signal to the lead CPU that the calling CPU has entered the test */ |
| 125 | tftf_send_event(&cpu_has_entered_test[core_pos]); |
| 126 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 127 | test_result = asymmetric_test_function(); |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 128 | |
| 129 | /* Ensure that EL3 still functional */ |
| 130 | smc_args args; |
| 131 | smc_ret_values smc_ret; |
| 132 | memset(&args, 0, sizeof(args)); |
| 133 | args.fid = SMCCC_VERSION; |
| 134 | smc_ret = tftf_smc(&args); |
| 135 | |
| 136 | tftf_testcase_printf("SMCCC Version = %d.%d\n", |
| 137 | (int)((smc_ret.ret0 >> SMCCC_VERSION_MAJOR_SHIFT) & SMCCC_VERSION_MAJOR_MASK), |
| 138 | (int)((smc_ret.ret0 >> SMCCC_VERSION_MINOR_SHIFT) & SMCCC_VERSION_MINOR_MASK)); |
| 139 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 140 | return test_result; |
| 141 | } |
| 142 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 143 | /* Set some variables that are accessible to all CPUs. */ |
| 144 | void test_init(test_result_t (*test_function)(void)) |
| 145 | { |
| 146 | int i; |
| 147 | |
| 148 | for (i = 0; i < PLATFORM_CORE_COUNT; i++) { |
| 149 | test_skipped[i] = false; |
| 150 | tftf_init_event(&cpu_has_entered_test[i]); |
| 151 | } |
| 152 | |
| 153 | asymmetric_test_function = test_function; |
| 154 | |
| 155 | /* Ensure the above writes are seen before any read */ |
| 156 | dmbsy(); |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * Run the given test function on all CPUs. If the test is skipped on all CPUs, |
| 161 | * the whole test is skipped. This is checked using the test_skipped array. |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 162 | */ |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 163 | test_result_t run_asymmetric_test(test_result_t (*test_function)(void)) |
Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 164 | { |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 165 | unsigned int lead_mpid; |
| 166 | unsigned int cpu_mpid, cpu_node; |
| 167 | unsigned int core_pos; |
| 168 | int psci_ret; |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 169 | bool all_cpus_skipped; |
| 170 | int i; |
| 171 | uint32_t aff_info; |
| 172 | test_result_t test_result; |
Charlie Bareham | e4f2eaa | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 173 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 174 | lead_mpid = read_mpidr_el1() & MPID_MASK; |
| 175 | |
| 176 | SKIP_TEST_IF_LESS_THAN_N_CPUS(2); |
| 177 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 178 | test_init(test_function); |
| 179 | |
| 180 | /* run test on lead CPU */ |
| 181 | test_result = test_function(); |
| 182 | |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 183 | /* Power on all CPUs */ |
| 184 | for_each_cpu(cpu_node) { |
| 185 | cpu_mpid = tftf_get_mpidr_from_node(cpu_node); |
| 186 | /* Skip lead CPU as it is already powered on */ |
| 187 | if (cpu_mpid == lead_mpid) |
| 188 | continue; |
| 189 | |
| 190 | psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t) non_lead_cpu_fn, 0); |
| 191 | if (psci_ret != PSCI_E_SUCCESS) { |
| 192 | tftf_testcase_printf( |
| 193 | "Failed to power on CPU 0x%x (%d)\n", |
| 194 | cpu_mpid, psci_ret); |
| 195 | return TEST_RESULT_SKIPPED; |
| 196 | } |
| 197 | } |
| 198 | |
| 199 | /* Wait for non-lead CPUs to enter the test */ |
| 200 | for_each_cpu(cpu_node) { |
| 201 | cpu_mpid = tftf_get_mpidr_from_node(cpu_node); |
| 202 | /* Skip lead CPU */ |
| 203 | if (cpu_mpid == lead_mpid) |
| 204 | continue; |
| 205 | |
| 206 | core_pos = platform_get_core_pos(cpu_mpid); |
| 207 | tftf_wait_for_event(&cpu_has_entered_test[core_pos]); |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 208 | } |
| 209 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 210 | /* Wait for all non-lead CPUs to power down */ |
| 211 | for_each_cpu(cpu_node) { |
| 212 | cpu_mpid = tftf_get_mpidr_from_node(cpu_node); |
| 213 | /* Skip lead CPU */ |
| 214 | if (cpu_mpid == lead_mpid) |
| 215 | continue; |
Arvind Ram Prakash | 8191621 | 2024-08-15 15:08:23 -0500 | [diff] [blame] | 216 | |
Charlie Bareham | 9601dc5 | 2024-08-28 17:27:18 +0100 | [diff] [blame^] | 217 | do { |
| 218 | aff_info = tftf_psci_affinity_info(cpu_mpid, |
| 219 | MPIDR_AFFLVL0); |
| 220 | } while (aff_info != PSCI_STATE_OFF); |
| 221 | } |
| 222 | |
| 223 | /* |
| 224 | * If the test was skipped on all CPUs, the whole test should be |
| 225 | * skipped. |
| 226 | */ |
| 227 | |
| 228 | all_cpus_skipped = true; |
| 229 | for (i = 0; i < PLATFORM_CORE_COUNT; i++) { |
| 230 | if (!test_skipped[i]) { |
| 231 | all_cpus_skipped = false; |
| 232 | break; |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | if (all_cpus_skipped) { |
| 237 | return TEST_RESULT_SKIPPED; |
| 238 | } else { |
| 239 | return test_result; |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | /* Test Asymmetric Support for FEAT_TRBE */ |
| 244 | test_result_t test_trbe_errata_asymmetric(void) |
| 245 | { |
| 246 | return run_asymmetric_test(test_trbe); |
| 247 | } |
| 248 | |
| 249 | /* Test Asymmetric Support for FEAT_SPE */ |
| 250 | test_result_t test_spe_asymmetric(void) |
| 251 | { |
| 252 | return run_asymmetric_test(test_spe); |
Manish Pandey | 0145ec3 | 2024-08-12 17:59:54 +0100 | [diff] [blame] | 253 | } |