blob: cf84122694ed74809950800995ffa990ead1ad08 [file] [log] [blame]
Manish Pandey0145ec32024-08-12 17:59:54 +01001/*
2 * Copyright (c) 2024, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
Manish Pandey0145ec32024-08-12 17:59:54 +01006#include <arch.h>
7#include <arch_helpers.h>
8#include <arm_arch_svc.h>
Arvind Ram Prakash81916212024-08-15 15:08:23 -05009#include <events.h>
10#include <plat_topology.h>
11#include <platform.h>
12#include <platform_def.h>
13#include <power_management.h>
14#include <psci.h>
Manish Pandey0145ec32024-08-12 17:59:54 +010015#include <smccc.h>
16#include <sync.h>
Arvind Ram Prakash81916212024-08-15 15:08:23 -050017#include <test_helpers.h>
Manish Pandey0145ec32024-08-12 17:59:54 +010018#include <tftf_lib.h>
Manish Pandey0145ec32024-08-12 17:59:54 +010019
Arvind Ram Prakash81916212024-08-15 15:08:23 -050020static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
21
Charlie Bareham9601dc52024-08-28 17:27:18 +010022/* Used when catching synchronous exceptions. */
23static volatile bool exception_triggered[PLATFORM_CORE_COUNT];
Arvind Ram Prakash81916212024-08-15 15:08:23 -050024
Charlie Bareham9601dc52024-08-28 17:27:18 +010025/*
26 * The whole test should only be skipped if the test was skipped on all CPUs.
27 * The test on each CPU can't return TEST_RESULT_SKIPPED, because the whole test
28 * is skipped if any of the CPUs return TEST_RESULT_SKIPPED. Instead, to skip a
29 * test, the test returns TEST_RESULT_SUCCESS, then sets a flag in the
30 * test_skipped array. This array is checked at the end by the
31 * run_asymmetric_test function.
32 */
33static volatile bool test_skipped[PLATFORM_CORE_COUNT];
34
35/*
36 * Test function which is run on each CPU. It is global so it is visible to all
37 * CPUS.
38 */
39static test_result_t (*asymmetric_test_function)(void);
Arvind Ram Prakash81916212024-08-15 15:08:23 -050040
Charlie Bareham70de3ff2024-08-20 11:27:25 +010041static bool exception_handler(void)
Arvind Ram Prakash81916212024-08-15 15:08:23 -050042{
Charlie Bareham9601dc52024-08-28 17:27:18 +010043 unsigned int mpid = read_mpidr_el1() & MPID_MASK;
44 unsigned int core_pos = platform_get_core_pos(mpid);
45
Arvind Ram Prakash81916212024-08-15 15:08:23 -050046 uint64_t esr_el2 = read_esr_el2();
Charlie Bareham9601dc52024-08-28 17:27:18 +010047
Arvind Ram Prakash81916212024-08-15 15:08:23 -050048 if (EC_BITS(esr_el2) == EC_UNKNOWN) {
Charlie Bareham70de3ff2024-08-20 11:27:25 +010049 /*
50 * This may be an undef injection, or a trap to EL2 due to a
51 * register not being present. Both cases have the same EC
52 * value.
53 */
Charlie Bareham9601dc52024-08-28 17:27:18 +010054 exception_triggered[core_pos] = true;
Arvind Ram Prakash81916212024-08-15 15:08:23 -050055 return true;
56 }
57
58 return false;
59}
60
Charlie Barehame4f2eaa2024-08-12 17:59:54 +010061static test_result_t test_trbe(void)
62{
63 unsigned int mpid = read_mpidr_el1() & MPID_MASK;
64 unsigned int core_pos = platform_get_core_pos(mpid);
Charlie Bareham9601dc52024-08-28 17:27:18 +010065 bool should_trigger_exception = is_trbe_errata_affected_core();
66
67 if (!is_feat_trbe_present()) {
68 test_skipped[core_pos] = true;
69 return TEST_RESULT_SUCCESS;
70 }
Charlie Barehame4f2eaa2024-08-12 17:59:54 +010071
Charlie Bareham70de3ff2024-08-20 11:27:25 +010072 register_custom_sync_exception_handler(exception_handler);
Charlie Bareham9601dc52024-08-28 17:27:18 +010073 exception_triggered[core_pos] = false;
Charlie Barehame4f2eaa2024-08-12 17:59:54 +010074 read_trblimitr_el1();
Charlie Bareham4397e442024-08-20 10:17:38 +010075 unregister_custom_sync_exception_handler();
Charlie Barehame4f2eaa2024-08-12 17:59:54 +010076
Charlie Bareham9601dc52024-08-28 17:27:18 +010077 /**
78 * NOTE: TRBE as an asymmetric feature is as exceptional one.
79 * Even if the hardware supports the feature, TF-A deliberately disables
80 * it at EL3. In this scenario, when the register "TRBLIMITR_EL1" is
81 * accessed, the registered undef injection handler should kick in and
82 * the exception will be handled synchronously at EL2.
83 */
84 if (exception_triggered[core_pos] != should_trigger_exception) {
Charlie Bareham70de3ff2024-08-20 11:27:25 +010085 tftf_testcase_printf("Exception triggered for core = %d "
Charlie Barehame4f2eaa2024-08-12 17:59:54 +010086 "when accessing TRB_LIMTR\n", core_pos);
Charlie Bareham9601dc52024-08-28 17:27:18 +010087 return TEST_RESULT_FAIL;
Charlie Barehame4f2eaa2024-08-12 17:59:54 +010088 }
89
Charlie Bareham9601dc52024-08-28 17:27:18 +010090 return TEST_RESULT_SUCCESS;
Charlie Barehame4f2eaa2024-08-12 17:59:54 +010091}
92
93static test_result_t test_spe(void)
94{
95 unsigned int mpid = read_mpidr_el1() & MPID_MASK;
96 unsigned int core_pos = platform_get_core_pos(mpid);
97
Charlie Bareham9601dc52024-08-28 17:27:18 +010098 /**
99 * NOTE: SPE as an asymmetric feature, we expect to access the
100 * PMSCR_EL1 register, when supported in the hardware.
101 * If the feature isn't supported, we skip the test.
102 * So on each individual CPU, we verify whether the feature's presence
103 * and based on it we access (if feature supported) or skip the test.
104 */
105 if (!is_feat_spe_supported()) {
106 test_skipped[core_pos] = true;
107 return TEST_RESULT_SUCCESS;
Charlie Barehame4f2eaa2024-08-12 17:59:54 +0100108 }
109
Charlie Bareham9601dc52024-08-28 17:27:18 +0100110 read_pmscr_el1();
111
112 return TEST_RESULT_SUCCESS;
Charlie Barehame4f2eaa2024-08-12 17:59:54 +0100113}
114
Jayanth Dodderi Chidanandf2f1e272024-09-03 11:49:51 +0100115static test_result_t test_tcr2(void)
116{
117 unsigned int mpid = read_mpidr_el1() & MPID_MASK;
118 unsigned int core_pos = platform_get_core_pos(mpid);
119
120 if (!is_feat_tcr2_supported()) {
121 test_skipped[core_pos] = true;
122 return TEST_RESULT_SUCCESS;
123 }
124
125 read_tcr2_el1();
126
127 return TEST_RESULT_SUCCESS;
128}
129
Arvind Ram Prakash81916212024-08-15 15:08:23 -0500130/*
Charlie Bareham9601dc52024-08-28 17:27:18 +0100131 * Runs on one CPU, and runs asymmetric_test_function.
Arvind Ram Prakash81916212024-08-15 15:08:23 -0500132 */
133static test_result_t non_lead_cpu_fn(void)
134{
135 unsigned int mpid = read_mpidr_el1() & MPID_MASK;
136 unsigned int core_pos = platform_get_core_pos(mpid);
Charlie Bareham9601dc52024-08-28 17:27:18 +0100137 test_result_t test_result;
Arvind Ram Prakash81916212024-08-15 15:08:23 -0500138
139 /* Signal to the lead CPU that the calling CPU has entered the test */
140 tftf_send_event(&cpu_has_entered_test[core_pos]);
141
Charlie Bareham9601dc52024-08-28 17:27:18 +0100142 test_result = asymmetric_test_function();
Arvind Ram Prakash81916212024-08-15 15:08:23 -0500143
144 /* Ensure that EL3 still functional */
145 smc_args args;
146 smc_ret_values smc_ret;
147 memset(&args, 0, sizeof(args));
148 args.fid = SMCCC_VERSION;
149 smc_ret = tftf_smc(&args);
150
151 tftf_testcase_printf("SMCCC Version = %d.%d\n",
152 (int)((smc_ret.ret0 >> SMCCC_VERSION_MAJOR_SHIFT) & SMCCC_VERSION_MAJOR_MASK),
153 (int)((smc_ret.ret0 >> SMCCC_VERSION_MINOR_SHIFT) & SMCCC_VERSION_MINOR_MASK));
154
Arvind Ram Prakash81916212024-08-15 15:08:23 -0500155 return test_result;
156}
157
Charlie Bareham9601dc52024-08-28 17:27:18 +0100158/* Set some variables that are accessible to all CPUs. */
159void test_init(test_result_t (*test_function)(void))
160{
161 int i;
162
163 for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
164 test_skipped[i] = false;
165 tftf_init_event(&cpu_has_entered_test[i]);
166 }
167
168 asymmetric_test_function = test_function;
169
170 /* Ensure the above writes are seen before any read */
171 dmbsy();
172}
173
174/*
175 * Run the given test function on all CPUs. If the test is skipped on all CPUs,
176 * the whole test is skipped. This is checked using the test_skipped array.
Arvind Ram Prakash81916212024-08-15 15:08:23 -0500177 */
Charlie Bareham9601dc52024-08-28 17:27:18 +0100178test_result_t run_asymmetric_test(test_result_t (*test_function)(void))
Manish Pandey0145ec32024-08-12 17:59:54 +0100179{
Arvind Ram Prakash81916212024-08-15 15:08:23 -0500180 unsigned int lead_mpid;
181 unsigned int cpu_mpid, cpu_node;
182 unsigned int core_pos;
183 int psci_ret;
Charlie Bareham9601dc52024-08-28 17:27:18 +0100184 bool all_cpus_skipped;
185 int i;
186 uint32_t aff_info;
187 test_result_t test_result;
Charlie Barehame4f2eaa2024-08-12 17:59:54 +0100188
Arvind Ram Prakash81916212024-08-15 15:08:23 -0500189 lead_mpid = read_mpidr_el1() & MPID_MASK;
190
191 SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
192
Charlie Bareham9601dc52024-08-28 17:27:18 +0100193 test_init(test_function);
194
195 /* run test on lead CPU */
196 test_result = test_function();
197
Arvind Ram Prakash81916212024-08-15 15:08:23 -0500198 /* Power on all CPUs */
199 for_each_cpu(cpu_node) {
200 cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
201 /* Skip lead CPU as it is already powered on */
202 if (cpu_mpid == lead_mpid)
203 continue;
204
205 psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t) non_lead_cpu_fn, 0);
206 if (psci_ret != PSCI_E_SUCCESS) {
207 tftf_testcase_printf(
208 "Failed to power on CPU 0x%x (%d)\n",
209 cpu_mpid, psci_ret);
210 return TEST_RESULT_SKIPPED;
211 }
212 }
213
214 /* Wait for non-lead CPUs to enter the test */
215 for_each_cpu(cpu_node) {
216 cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
217 /* Skip lead CPU */
218 if (cpu_mpid == lead_mpid)
219 continue;
220
221 core_pos = platform_get_core_pos(cpu_mpid);
222 tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
Arvind Ram Prakash81916212024-08-15 15:08:23 -0500223 }
224
Charlie Bareham9601dc52024-08-28 17:27:18 +0100225 /* Wait for all non-lead CPUs to power down */
226 for_each_cpu(cpu_node) {
227 cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
228 /* Skip lead CPU */
229 if (cpu_mpid == lead_mpid)
230 continue;
Arvind Ram Prakash81916212024-08-15 15:08:23 -0500231
Charlie Bareham9601dc52024-08-28 17:27:18 +0100232 do {
233 aff_info = tftf_psci_affinity_info(cpu_mpid,
234 MPIDR_AFFLVL0);
235 } while (aff_info != PSCI_STATE_OFF);
236 }
237
238 /*
239 * If the test was skipped on all CPUs, the whole test should be
240 * skipped.
241 */
242
243 all_cpus_skipped = true;
244 for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
245 if (!test_skipped[i]) {
246 all_cpus_skipped = false;
247 break;
248 }
249 }
250
251 if (all_cpus_skipped) {
252 return TEST_RESULT_SKIPPED;
253 } else {
254 return test_result;
255 }
256}
257
258/* Test Asymmetric Support for FEAT_TRBE */
259test_result_t test_trbe_errata_asymmetric(void)
260{
261 return run_asymmetric_test(test_trbe);
262}
263
264/* Test Asymmetric Support for FEAT_SPE */
265test_result_t test_spe_asymmetric(void)
266{
267 return run_asymmetric_test(test_spe);
Manish Pandey0145ec32024-08-12 17:59:54 +0100268}
Jayanth Dodderi Chidanandf2f1e272024-09-03 11:49:51 +0100269
270test_result_t test_tcr2_asymmetric(void)
271{
272 return run_asymmetric_test(test_tcr2);
273}