blob: 3af35ad024280fad664358a10986574a43887aee [file] [log] [blame]
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02001/*
Boyan Karatoteva4b33342025-06-19 16:24:29 +01002 * Copyright (c) 2018-2025, Arm Limited. All rights reserved.
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02008#include <debug.h>
Antonio Nino Diaz09a00ef2019-01-11 13:12:58 +00009#include <drivers/arm/arm_gic.h>
Boyan Karatoteva4b33342025-06-19 16:24:29 +010010#include <drivers/arm/gic_v2v3_common.h>
Antonio Nino Diaz09a00ef2019-01-11 13:12:58 +000011#include <drivers/arm/gic_v2.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020012#include <events.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020013#include <irq.h>
14#include <plat_topology.h>
15#include <platform.h>
16#include <power_management.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020017#include <smccc.h>
18#include <string.h>
19#include <test_helpers.h>
20#include <tftf_lib.h>
21
22#define TEST_ITERATIONS_COUNT 1000
23
24#define SUSPEND_TIME_1_SEC 1000
25
26#define TEST_VALUE_1 4
27#define TEST_VALUE_2 6
28
29static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
30static event_t cpu_has_finished_test[PLATFORM_CORE_COUNT];
31
32static volatile int requested_irq_received[PLATFORM_CORE_COUNT];
33static volatile int wakeup_irq_received[PLATFORM_CORE_COUNT];
34static volatile int individual_test_failed[PLATFORM_CORE_COUNT];
35static volatile int pwr_level_being_tested;
36static volatile int test_finished_flag;
37
38/* Dummy timer handler that sets a flag to check it has been called. */
39static int suspend_wakeup_handler(void *data)
40{
41 u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
42 unsigned int core_pos = platform_get_core_pos(core_mpid);
43
44 assert(wakeup_irq_received[core_pos] == 0);
45
46 wakeup_irq_received[core_pos] = 1;
47
48 return 0;
49}
50
51/* Dummy handler that sets a flag so as to check it has been called. */
52static int test_handler(void *data)
53{
54 u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
55 unsigned int core_pos = platform_get_core_pos(core_mpid);
56
57 assert(requested_irq_received[core_pos] == 0);
58
59 requested_irq_received[core_pos] = 1;
60
61 return 0;
62}
63
64/* Register a dummy handler for SGI #0 and enable it. Returns 0 if success. */
65static int register_and_enable_test_sgi_handler(unsigned int core_pos)
66{
67 /* SGIs #0 - #6 are freely available. */
68
Boyan Karatotev6d144db2025-06-23 15:04:53 +010069 int ret = tftf_irq_register_handler_sgi(IRQ_NS_SGI_0, test_handler);
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020070
71 if (ret != 0) {
72 tftf_testcase_printf(
73 "Failed to register SGI handler @ CPU %d (rc = %d)\n",
74 core_pos, ret);
75 return -1;
76 }
77
Boyan Karatotev6d144db2025-06-23 15:04:53 +010078 tftf_irq_enable_sgi(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020079
80 return 0;
81}
82
83/* Disable and unregister the dummy handler for SGI #0. */
84static void unregister_and_disable_test_sgi_handler(void)
85{
Boyan Karatotev6d144db2025-06-23 15:04:53 +010086 tftf_irq_disable_sgi(IRQ_NS_SGI_0);
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020087
Boyan Karatotev6d144db2025-06-23 15:04:53 +010088 tftf_irq_unregister_handler_sgi(IRQ_NS_SGI_0);
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020089}
90
91/*
92 * Generate a pre-empted STD SMC on the CPU who called this function. Steps:
93 * 1. IRQs are disabled.
94 * 2. An SGI is sent to itself. It cannot be handled because IRQs are disabled.
95 * 3. Invoke an STD SMC on the TSP, which is preempted by the pending SGI.
96 * 4. IRQs are enabled, the SGI is handled.
97 * 5. This function is exited with a preempted STD SMC waiting to be resumed.
98 */
99static int preempt_std_smc_on_this_cpu(void)
100{
101 smc_args std_smc_args;
102 smc_ret_values smc_ret;
103
104 int result = TEST_RESULT_SUCCESS;
105 u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
106 unsigned int core_pos = platform_get_core_pos(core_mpid);
107
108 if (register_and_enable_test_sgi_handler(core_pos) != 0) {
109 return TEST_RESULT_FAIL;
110 }
111
112 /* Set PSTATE.I to 0. */
113 disable_irq();
114
115 /*
116 * Send SGI to itself. It can't be handled because the
117 * interrupts are disabled.
118 */
119 requested_irq_received[core_pos] = 0;
120
121 tftf_send_sgi(IRQ_NS_SGI_0, core_pos);
122
123 /*
124 * Invoke an STD SMC. Should be pre-empted because of the SGI
125 * that is waiting.
126 */
Sandrine Bailleux17795062018-12-13 16:02:41 +0100127 std_smc_args.fid = TSP_STD_FID(TSP_ADD);
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200128 std_smc_args.arg1 = TEST_VALUE_1;
129 std_smc_args.arg2 = TEST_VALUE_2;
130 smc_ret = tftf_smc(&std_smc_args);
131 if (smc_ret.ret0 != TSP_SMC_PREEMPTED) {
132 tftf_testcase_printf("SMC @ CPU %d returned 0x%llX.\n", core_pos,
133 (unsigned long long)smc_ret.ret0);
134 result = TEST_RESULT_FAIL;
135 }
136
137 /* Set PSTATE.I to 1. Let the SGI be handled. */
138 enable_irq();
139
140 /* Cleanup. Disable and unregister SGI handler. */
141 unregister_and_disable_test_sgi_handler();
142
143 /*
144 * Check that the SGI has been handled, but don't fail if it hasn't
145 * because there is no guarantee that it will have actually happened at
146 * this point.
147 */
148 if (requested_irq_received[core_pos] == 0) {
149 VERBOSE("SGI not handled @ CPU %d\n", core_pos);
150 }
151
152 return result;
153}
154
155/* Resume a pre-empted STD SMC on the CPU who called this function. */
156static int resume_std_smc_on_this_cpu(void)
157{
158 smc_args std_smc_args;
159 smc_ret_values smc_ret;
160
161 u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
162 unsigned int core_pos = platform_get_core_pos(core_mpid);
163
164 /* Resume the STD SMC. Verify result. */
Sandrine Bailleux17795062018-12-13 16:02:41 +0100165 std_smc_args.fid = TSP_FID_RESUME;
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200166 smc_ret = tftf_smc(&std_smc_args);
167 if ((smc_ret.ret0 != 0) || (smc_ret.ret1 != TEST_VALUE_1 * 2)
168 || (smc_ret.ret2 != TEST_VALUE_2 * 2)) {
169 tftf_testcase_printf(
170 "SMC @ CPU %d returned 0x%llX 0x%llX 0x%llX instead of 0x0 0x%X 0x%X\n",
171 core_pos, (unsigned long long)smc_ret.ret0,
172 (unsigned long long)smc_ret.ret1,
173 (unsigned long long)smc_ret.ret2,
174 TEST_VALUE_1 * 2, TEST_VALUE_2 * 2);
175 return TEST_RESULT_FAIL;
176 }
177 return TEST_RESULT_SUCCESS;
178}
179
180/*
181 * Try to resume a pre-empted STD SMC on the CPU who called this function,
182 * but check for SMC_UNKNOWN as a result.
183 */
184static int resume_fail_std_smc_on_this_cpu(void)
185{
186 smc_args std_smc_args;
187 smc_ret_values smc_ret;
188
189 u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
190 unsigned int core_pos = platform_get_core_pos(core_mpid);
191
192 /* Resume the STD SMC. Verify result. */
Sandrine Bailleux17795062018-12-13 16:02:41 +0100193 std_smc_args.fid = TSP_FID_RESUME;
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200194 smc_ret = tftf_smc(&std_smc_args);
195 if (smc_ret.ret0 != SMC_UNKNOWN) {
196 tftf_testcase_printf(
197 "SMC @ CPU %d returned 0x%llX 0x%llX 0x%llX instead of SMC_UNKNOWN\n",
198 core_pos, (unsigned long long)smc_ret.ret0,
199 (unsigned long long)smc_ret.ret1,
200 (unsigned long long)smc_ret.ret2);
201 return TEST_RESULT_FAIL;
202 }
203 return TEST_RESULT_SUCCESS;
204}
205
206/*******************************************************************************
207 * Test pre-emption during STD SMCs.
208 ******************************************************************************/
209
210/* Test routine for test_irq_preempted_std_smc. */
211static test_result_t test_irq_preempted_std_smc_fn(void)
212{
213 u_register_t cpu_mpid = read_mpidr_el1() & MPID_MASK;
214 unsigned int core_pos = platform_get_core_pos(cpu_mpid);
215
216 tftf_send_event(&cpu_has_entered_test[core_pos]);
217
218 for (unsigned int i = 0; i < TEST_ITERATIONS_COUNT; i++) {
219
220 if (preempt_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS)
221 return TEST_RESULT_FAIL;
222
223 if (resume_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS)
224 return TEST_RESULT_FAIL;
225 }
226
227 return TEST_RESULT_SUCCESS;
228}
229
230/*
231 * @Test_Aim@ Multicore preemption test. Tests IRQ preemption during STD SMC
232 * from multiple cores. Uses an SGI to trigger the preemption. TSP should be
233 * present.
234 *
235 * Steps: 1. Invoke Standard SMC on the TSP and try to preempt it via IRQ.
236 * 2. Resume the preempted SMC and verify the result.
237 *
238 * Returns SUCCESS if above 2 steps are performed correctly in every CPU else
239 * failure.
240 */
241test_result_t test_irq_preempted_std_smc(void)
242{
243 u_register_t cpu_mpid;
244 unsigned int cpu_node, core_pos;
245 int psci_ret;
246 u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
247
248 SKIP_TEST_IF_TSP_NOT_PRESENT();
249
250 for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
251 tftf_init_event(&cpu_has_entered_test[i]);
252 }
253
254 /* Power on all CPUs */
255 for_each_cpu(cpu_node) {
256 cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
257 /* Skip lead CPU as it is already powered on */
258 if (cpu_mpid == lead_mpid) {
259 continue;
260 }
261
262 core_pos = platform_get_core_pos(cpu_mpid);
263
264 psci_ret = tftf_cpu_on(cpu_mpid,
265 (uintptr_t)test_irq_preempted_std_smc_fn, 0);
266 if (psci_ret != PSCI_E_SUCCESS) {
267 tftf_testcase_printf(
268 "Failed to power on CPU %d (rc = %d)\n",
269 core_pos, psci_ret);
270 return TEST_RESULT_FAIL;
271 }
272 }
273
274 /* Wait until all CPUs have started the test. */
275 for_each_cpu(cpu_node) {
276 cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
277 /* Skip lead CPU */
278 if (cpu_mpid == lead_mpid) {
279 continue;
280 }
281
282 core_pos = platform_get_core_pos(cpu_mpid);
283 tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
284 }
285
286 /* Enter the test on lead CPU and return the result. */
287 return test_irq_preempted_std_smc_fn();
288}
289
290/*
291 * Test routine for non-lead CPUs for test_resume_preempted_std_smc_other_cpus.
292 */
293static test_result_t test_resume_preempted_std_smc_other_cpus_non_lead_fn(void)
294{
295 test_result_t result = TEST_RESULT_SUCCESS;
296
297 u_register_t mpid = read_mpidr_el1() & MPID_MASK;
298 unsigned int core_pos = platform_get_core_pos(mpid);
299
300 /*
301 * Try to resume the STD SMC invoked from the lead CPU. It shouldn't be
302 * able to do it.
303 */
304
305 smc_args std_smc_args;
Sandrine Bailleux17795062018-12-13 16:02:41 +0100306 std_smc_args.fid = TSP_FID_RESUME;
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200307 smc_ret_values smc_ret = tftf_smc(&std_smc_args);
308 if (smc_ret.ret0 != SMC_UNKNOWN) {
309 tftf_testcase_printf(
310 "SMC @ lead CPU returned 0x%llX 0x%llX 0x%llX instead of SMC_UNKNOWN\n",
311 (unsigned long long)smc_ret.ret0,
312 (unsigned long long)smc_ret.ret1,
313 (unsigned long long)smc_ret.ret2);
314 result = TEST_RESULT_FAIL;
315 }
316
317 /* Signal to the lead CPU that the calling CPU has finished the test */
318 tftf_send_event(&cpu_has_finished_test[core_pos]);
319
320 return result;
321}
322
323/*
324 * @Test_Aim@ Multicore preemption test. For a MP Secure Payload, the
325 * pre-emption on one CPU should not affect the other CPU. Trying to resume
326 * one STD SMC that was preempted on one CPU shouldn't be possible from any
327 * other CPU.
328 *
329 * Steps: 1. Issue Standard SMC and try preempting it via IRQ on lead CPU.
330 * 2. Try to resume it from the rest of the CPUs sequentially.
331 * 3. Resume the preempted SMC from the lead CPU and verify the result.
332 *
333 * Returns SUCCESS if step 2 fails and steps 1 and 3 succeed, else failure.
334 */
335test_result_t test_resume_preempted_std_smc_other_cpus(void)
336{
337 int i;
338 u_register_t cpu_mpid;
339 unsigned int cpu_node, core_pos;
340 int psci_ret;
341
342 u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
343
344 SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
345
346 SKIP_TEST_IF_TSP_NOT_PRESENT();
347
348 /*
349 * Invoke a STD SMC that will be pre-empted.
350 */
351 if (preempt_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS) {
352 return TEST_RESULT_FAIL;
353 }
354
355 /*
356 * Try to resume the STD SMC from the rest of CPUs. It shouldn't be
357 * possible.
358 */
359
360 for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
361 tftf_init_event(&cpu_has_finished_test[i]);
362 }
363
364 /* Power on all CPUs and perform test sequentially. */
365 for_each_cpu(cpu_node) {
366 cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
367 /* Skip lead CPU as it's the one with the pre-empted STD SMC. */
368 if (cpu_mpid == lead_mpid) {
369 continue;
370 }
371
372 core_pos = platform_get_core_pos(cpu_mpid);
373
374 psci_ret = tftf_cpu_on(cpu_mpid,
375 (uintptr_t)test_resume_preempted_std_smc_other_cpus_non_lead_fn, 0);
376 if (psci_ret != PSCI_E_SUCCESS) {
377 tftf_testcase_printf(
378 "Failed to power on CPU %d (rc = %d)\n",
379 core_pos, psci_ret);
380 return TEST_RESULT_FAIL;
381 }
382
383 /* Wait until the test is finished to begin with the next CPU. */
384 tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
385 }
386
387 /*
388 * Try to resume the STD SMC from the lead CPU. It should be able to do
389 * it and to return the correct result.
390 */
391 return resume_std_smc_on_this_cpu();
392}
393
394/* Test routine for secondary CPU for test_resume_different_cpu_preempted_std_smc */
395static test_result_t test_resume_different_cpu_preempted_std_smc_non_lead_fn(void)
396{
397 smc_args std_smc_args;
398 smc_ret_values smc_ret;
399
400 u_register_t mpid = read_mpidr_el1() & MPID_MASK;
401 unsigned int core_pos = platform_get_core_pos(mpid);
402
403 /* Signal to the lead CPU that the calling CPU has entered the test */
404 tftf_send_event(&cpu_has_entered_test[core_pos]);
405
406 /* Register and enable SGI. SGIs #0 - #6 are freely available. */
407 if (register_and_enable_test_sgi_handler(core_pos) != 0) {
408 /* Signal to the lead CPU that the calling CPU has finished */
409 tftf_send_event(&cpu_has_finished_test[core_pos]);
410 return TEST_RESULT_FAIL;
411 }
412
413 /* Set PSTATE.I to 0. */
414 disable_irq();
415
416 /*
417 * Send SGI to itself. It can't be handled because the interrupts are
418 * disabled.
419 */
420 requested_irq_received[core_pos] = 0;
421
422 tftf_send_sgi(IRQ_NS_SGI_0, core_pos);
423
424 /*
425 * Invoke an STD SMC. Should be pre-empted because of the SGI that is
426 * waiting. It has to be different than the one invoked from the lead
427 * CPU.
428 */
Sandrine Bailleux17795062018-12-13 16:02:41 +0100429 std_smc_args.fid = TSP_STD_FID(TSP_MUL);
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200430 std_smc_args.arg1 = TEST_VALUE_1;
431 std_smc_args.arg2 = TEST_VALUE_2;
432 smc_ret = tftf_smc(&std_smc_args);
433 if (smc_ret.ret0 != TSP_SMC_PREEMPTED) {
434 tftf_testcase_printf(
435 "SMC @ CPU %d returned 0x%llX instead of TSP_SMC_PREEMPTED.\n",
436 core_pos, (unsigned long long)smc_ret.ret0);
437 enable_irq();
438 unregister_and_disable_test_sgi_handler();
439 /* Signal to the lead CPU that the calling CPU has finished */
440 tftf_send_event(&cpu_has_finished_test[core_pos]);
441 return TEST_RESULT_FAIL;
442 }
443
444 /* Set PSTATE.I to 1. Let the SGI be handled. */
445 enable_irq();
446
447 /* Cleanup. Disable and unregister SGI handler. */
448 unregister_and_disable_test_sgi_handler();
449
450 /*
451 * Check that the SGI has been handled, but don't fail if it hasn't
452 * because there is no guarantee that it will have actually happened at
453 * this point.
454 */
455 if (requested_irq_received[core_pos] == 0) {
456 VERBOSE("SGI not handled @ CPU %d\n", core_pos);
457 }
458
459 /* Resume the STD SMC. Verify result. */
Sandrine Bailleux17795062018-12-13 16:02:41 +0100460 std_smc_args.fid = TSP_FID_RESUME;
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200461 smc_ret = tftf_smc(&std_smc_args);
462 if ((smc_ret.ret0 != 0) || (smc_ret.ret1 != TEST_VALUE_1*TEST_VALUE_1)
463 || (smc_ret.ret2 != TEST_VALUE_2*TEST_VALUE_2)) {
464 tftf_testcase_printf(
465 "SMC @ CPU %d returned 0x%llX 0x%llX 0x%llX instead of 0x0 0x%X 0x%X\n",
466 core_pos, (unsigned long long)smc_ret.ret0,
467 (unsigned long long)smc_ret.ret1,
468 (unsigned long long)smc_ret.ret2,
469 TEST_VALUE_1*2, TEST_VALUE_2*2);
470 /* Signal to the lead CPU that the calling CPU has finished */
471 tftf_send_event(&cpu_has_finished_test[core_pos]);
472 return TEST_RESULT_FAIL;
473 }
474
475 /* Try to resume the lead CPU STD SMC. Verify result. */
Sandrine Bailleux17795062018-12-13 16:02:41 +0100476 std_smc_args.fid = TSP_FID_RESUME;
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200477 smc_ret = tftf_smc(&std_smc_args);
478 if (smc_ret.ret0 != SMC_UNKNOWN) {
479 tftf_testcase_printf(
480 "SMC @ CPU %d returned 0x%llX 0x%llX 0x%llX instead of SMC_UNKNOWN\n",
481 core_pos, (unsigned long long)smc_ret.ret0,
482 (unsigned long long)smc_ret.ret1,
483 (unsigned long long)smc_ret.ret2);
484 /* Signal to the lead CPU that the calling CPU has finished */
485 tftf_send_event(&cpu_has_finished_test[core_pos]);
486 return TEST_RESULT_FAIL;
487 }
488
489 /* Signal to the lead CPU that the calling CPU has finished the test */
490 tftf_send_event(&cpu_has_finished_test[core_pos]);
491 return TEST_RESULT_SUCCESS;
492}
493
494/*
495 * @Test_Aim@ Multicore preemption test. For a MP Secure Payload, the
496 * pre-emption on one CPU should not affect the other CPU. Trying to resume
497 * one STD SMC pre-empted on one CPU shouldn't be possible from any other CPU
498 * involved in the test, and the STD SMC that is resumed from each CPU should
499 * be the same one that was invoked from it.
500 *
501 * Steps: 1. Lead and secondary CPUs set different preempted STD SMCs.
502 * 2. Resume the preempted SMC from secondary CPU. Verify the result.
503 * 3. Try to resume again to check if it can resume the lead SMC.
504 * 4. Resume the preempted SMC from lead CPU. Verify the result.
505 *
506 * Returns SUCCESS if steps 1, 2 and 4 succeed and step 3 fails, else failure.
507 */
508test_result_t test_resume_different_cpu_preempted_std_smc(void)
509{
510 smc_args std_smc_args;
511 smc_ret_values smc_ret;
512 u_register_t cpu_mpid;
513 unsigned int core_pos;
514 int psci_ret;
515
516 u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
517 unsigned int lead_pos = platform_get_core_pos(lead_mpid);
518
519 SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
520
521 SKIP_TEST_IF_TSP_NOT_PRESENT();
522
523 /*
524 * Generate a SGI on the lead CPU that can't be handled because the
525 * interrupts are disabled.
526 */
527 register_and_enable_test_sgi_handler(lead_mpid);
528 disable_irq();
529
530 requested_irq_received[lead_pos] = 0;
531
532 tftf_send_sgi(IRQ_NS_SGI_0, lead_pos);
533
534 /*
535 * Invoke an STD SMC. Should be pre-empted because of the SGI that is
536 * waiting.
537 */
Sandrine Bailleux17795062018-12-13 16:02:41 +0100538 std_smc_args.fid = TSP_STD_FID(TSP_ADD);
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200539 std_smc_args.arg1 = TEST_VALUE_1;
540 std_smc_args.arg2 = TEST_VALUE_2;
541 smc_ret = tftf_smc(&std_smc_args);
542 if (smc_ret.ret0 != TSP_SMC_PREEMPTED) {
543 tftf_testcase_printf(
544 "SMC @ lead CPU returned 0x%llX instead of TSP_SMC_PREEMPTED.\n",
545 (unsigned long long)smc_ret.ret0);
546 enable_irq();
547 unregister_and_disable_test_sgi_handler();
548 return TEST_RESULT_FAIL;
549 }
550
551 /* Set PSTATE.I to 1. Let the SGI be handled. */
552 enable_irq();
553
554 /* Cleanup. Disable and unregister SGI handler. */
555 unregister_and_disable_test_sgi_handler();
556
557 /*
558 * Check that the SGI has been handled, but don't fail if it hasn't
559 * because there is no guarantee that it will have actually happened at
560 * this point.
561 */
562 if (requested_irq_received[lead_pos] == 0) {
563 VERBOSE("SGI not handled @ lead CPU.\n");
564 }
565
566 /* Generate a preempted SMC in a secondary CPU. */
567 cpu_mpid = tftf_find_any_cpu_other_than(lead_mpid);
568 if (cpu_mpid == INVALID_MPID) {
569 tftf_testcase_printf("Couldn't find another CPU.\n");
570 return TEST_RESULT_FAIL;
571 }
572
573 core_pos = platform_get_core_pos(cpu_mpid);
574 tftf_init_event(&cpu_has_finished_test[core_pos]);
575
576 psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t)
577 test_resume_different_cpu_preempted_std_smc_non_lead_fn, 0);
578 if (psci_ret != PSCI_E_SUCCESS) {
579 tftf_testcase_printf("Failed to power on CPU %d (rc = %d)\n",
580 core_pos, psci_ret);
581 return TEST_RESULT_FAIL;
582 }
583
584 /* Wait until the test is finished to continue. */
585 tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
586
587 /*
588 * Try to resume the STD SMC from the lead CPU. It should be able resume
589 * the one it generated before and to return the correct result.
590 */
Sandrine Bailleux17795062018-12-13 16:02:41 +0100591 std_smc_args.fid = TSP_FID_RESUME;
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200592 smc_ret = tftf_smc(&std_smc_args);
593 if ((smc_ret.ret0 != 0) || (smc_ret.ret1 != TEST_VALUE_1 * 2) ||
594 (smc_ret.ret2 != TEST_VALUE_2 * 2)) {
595 tftf_testcase_printf(
596 "SMC @ lead CPU returned 0x%llX 0x%llX 0x%llX instead of 0x0 0x%X 0x%X\n",
597 (unsigned long long)smc_ret.ret0,
598 (unsigned long long)smc_ret.ret1,
599 (unsigned long long)smc_ret.ret2,
600 TEST_VALUE_1*2, TEST_VALUE_2*2);
601 return TEST_RESULT_FAIL;
602 }
603
604 return TEST_RESULT_SUCCESS;
605}
606
607/*******************************************************************************
608 * Test PSCI APIs while preempted.
609 ******************************************************************************/
610
611/*
612 * First part of the test routine for test_psci_cpu_on_off_preempted.
613 * Prepare a pre-empted STD SMC.
614 */
615static test_result_t test_psci_cpu_on_off_preempted_non_lead_fn_1(void)
616{
617 test_result_t result = TEST_RESULT_SUCCESS;
618
619 u_register_t mpid = read_mpidr_el1() & MPID_MASK;
620 unsigned int core_pos = platform_get_core_pos(mpid);
621
622 if (preempt_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS) {
623 return TEST_RESULT_FAIL;
624 }
625
626 /*
627 * Signal to the lead CPU that the calling CPU has entered the test
628 * conditions for the second part.
629 */
630 tftf_send_event(&cpu_has_entered_test[core_pos]);
631
632 /*
633 * Now this CPU has to be turned off. Since this is not a lead CPU, it
634 * will be done in run_tests(). If it was done here, cpus_cnt wouldn't
635 * decrement and the tftf would think there is still a CPU running, so
636 * it wouldn't finish.
637 *
638 * The result will be overwritten when the second part of the test is
639 * executed.
640 */
641 return result;
642}
643
644/*
645 * Second part of the test routine for test_psci_cpu_on_off_preempted.
646 * Try to resume the previously pre-empted STD SMC.
647 */
648static test_result_t test_psci_cpu_on_off_preempted_non_lead_fn_2(void)
649{
650 test_result_t result;
651
652 u_register_t mpid = read_mpidr_el1() & MPID_MASK;
653 unsigned int core_pos = platform_get_core_pos(mpid);
654
655 /* Try to resume the STD SMC. Check that it fails. */
656 result = resume_fail_std_smc_on_this_cpu();
657
658 /* Signal to the lead CPU that the calling CPU has finished the test */
659 tftf_send_event(&cpu_has_finished_test[core_pos]);
660
661 return result;
662}
663
664/*
665 * @Test_Aim@ Resume preempted STD SMC after PSCI CPU OFF/ON cycle.
666 *
667 * Steps: 1. Each CPU sets a preempted STD SMC.
668 * 2. They send an event to the lead CPU and call PSCI CPU OFF.
669 * 3. The lead CPU invokes PSCI CPU ON for the secondaries (warm boot).
670 * 4. Try to resume the preempted STD SMC on secondary CPUs.
671 *
672 * Returns SUCCESS if steps 1, 2 or 3 succeed and step 4 fails, else failure.
673 */
674test_result_t test_psci_cpu_on_off_preempted_std_smc(void)
675{
676 int i;
677 int all_powered_down;
678 u_register_t cpu_mpid;
679 unsigned int cpu_node, core_pos;
680 int psci_ret;
681 u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
682
683 SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
684
685 SKIP_TEST_IF_TSP_NOT_PRESENT();
686
687 for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
688 tftf_init_event(&cpu_has_entered_test[i]);
689 tftf_init_event(&cpu_has_finished_test[i]);
690 }
691
692 /* Power on all CPUs */
693 for_each_cpu(cpu_node) {
694 cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
695 /* Skip lead CPU as it is already powered on */
696 if (cpu_mpid == lead_mpid) {
697 continue;
698 }
699
700 core_pos = platform_get_core_pos(cpu_mpid);
701
702 psci_ret = tftf_cpu_on(cpu_mpid,
703 (uintptr_t)test_psci_cpu_on_off_preempted_non_lead_fn_1, 0);
704 if (psci_ret != PSCI_E_SUCCESS) {
705 tftf_testcase_printf("Failed to power on CPU %d (rc = %d)\n",
706 core_pos, psci_ret);
707 return TEST_RESULT_FAIL;
708 }
709 }
710
711 /* Wait for non-lead CPUs to exit the first part of the test */
712 for_each_cpu(cpu_node) {
713 cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
714 /* Skip lead CPU */
715 if (cpu_mpid == lead_mpid) {
716 continue;
717 }
718
719 core_pos = platform_get_core_pos(cpu_mpid);
720 tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
721 }
722
723 /* Check that all secondary CPUs are powered off. */
724 all_powered_down = 0;
725 while (all_powered_down == 0) {
726 all_powered_down = 1;
727 for_each_cpu(cpu_node) {
728 cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
729 if (cpu_mpid == lead_mpid) {
730 continue;
731 }
732 if (tftf_is_cpu_online(cpu_mpid) != 0) {
733 all_powered_down = 0;
734 }
735 }
736 }
737
738 /* Start the second part of the test */
739 for_each_cpu(cpu_node) {
740 cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
741 /* Skip lead CPU as it is already powered on */
742 if (cpu_mpid == lead_mpid) {
743 continue;
744 }
745
746 core_pos = platform_get_core_pos(cpu_mpid);
747
748 psci_ret = tftf_cpu_on(cpu_mpid,
749 (uintptr_t)test_psci_cpu_on_off_preempted_non_lead_fn_2, 0);
750 if (psci_ret != PSCI_E_SUCCESS) {
751 tftf_testcase_printf("Failed to power on CPU 0x%x (rc = %d)\n",
752 core_pos, psci_ret);
753 return TEST_RESULT_FAIL;
754 }
755 }
756
757 /* Wait for non-lead CPUs to finish the second part of the test. */
758 for_each_cpu(cpu_node) {
759 cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
760 /* Skip lead CPU */
761 if (cpu_mpid == lead_mpid) {
762 continue;
763 }
764
765 core_pos = platform_get_core_pos(cpu_mpid);
766 tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
767 }
768
769 return TEST_RESULT_SUCCESS;
770}
771
772/******************************************************************************/
773
774/*
775 * @Test_Aim@ Resume preempted STD SMC after PSCI SYSTEM SUSPEND (in case it is
776 * supported).
777 *
778 * Steps: 1. The lead CPU sets a preempted STD SMC.
779 * 2. It calls PSCI SYSTEM SUSPEND with a wakeup timer for 1 sec.
780 * 3. Try to resume the preempted STD SMC.
781 *
782 * Returns SUCCESS if steps 1 and 2 succeed and step 3 fails.
783 */
784test_result_t test_psci_system_suspend_preempted_std_smc(void)
785{
786 int psci_ret;
787 int result = TEST_RESULT_SUCCESS;
788
789 u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
790 unsigned int lead_pos = platform_get_core_pos(lead_mpid);
791
792 SKIP_TEST_IF_TSP_NOT_PRESENT();
793
794 if (!is_psci_sys_susp_supported()) {
795 tftf_testcase_printf(
796 "SYSTEM_SUSPEND is not supported.\n");
797 return TEST_RESULT_SKIPPED;
798 }
799
800 if (preempt_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS) {
801 return TEST_RESULT_FAIL;
802 }
803
804 if (!is_sys_suspend_state_ready()) {
805 result = TEST_RESULT_FAIL;
806 }
807
808 /* Prepare wakeup timer. IRQs need to be enabled. */
809 wakeup_irq_received[lead_pos] = 0;
810
811 tftf_timer_register_handler(suspend_wakeup_handler);
812
813 /* Program timer to fire interrupt after timer expires */
814 tftf_program_timer(SUSPEND_TIME_1_SEC);
815
816 /* Issue PSCI_SYSTEM_SUSPEND. */
817 psci_ret = tftf_system_suspend();
818
819 while (!wakeup_irq_received[lead_pos])
820 ;
821
822 if (psci_ret != PSCI_E_SUCCESS) {
823 mp_printf("SYSTEM_SUSPEND from lead CPU failed. ret: 0x%x\n",
824 psci_ret);
825 result = TEST_RESULT_FAIL;
826 }
827
828 /* Remove timer after waking up.*/
829 tftf_cancel_timer();
830 tftf_timer_unregister_handler();
831
832 if (resume_fail_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS) {
833 result = TEST_RESULT_FAIL;
834 }
835
836 return result;
837}