Trusted Firmware-A Tests, version 2.0
This is the first public version of the tests for the Trusted
Firmware-A project. Please see the documentation provided in the
source tree for more details.
Change-Id: I6f3452046a1351ac94a71b3525c30a4ca8db7867
Signed-off-by: Sandrine Bailleux <sandrine.bailleux@arm.com>
Co-authored-by: amobal01 <amol.balasokamble@arm.com>
Co-authored-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
Co-authored-by: Asha R <asha.r@arm.com>
Co-authored-by: Chandni Cherukuri <chandni.cherukuri@arm.com>
Co-authored-by: David Cunado <david.cunado@arm.com>
Co-authored-by: Dimitris Papastamos <dimitris.papastamos@arm.com>
Co-authored-by: Douglas Raillard <douglas.raillard@arm.com>
Co-authored-by: dp-arm <dimitris.papastamos@arm.com>
Co-authored-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com>
Co-authored-by: Jonathan Wright <jonathan.wright@arm.com>
Co-authored-by: Kévin Petit <kevin.petit@arm.com>
Co-authored-by: Roberto Vargas <roberto.vargas@arm.com>
Co-authored-by: Sathees Balya <sathees.balya@arm.com>
Co-authored-by: Shawon Roy <Shawon.Roy@arm.com>
Co-authored-by: Soby Mathew <soby.mathew@arm.com>
Co-authored-by: Thomas Abraham <thomas.abraham@arm.com>
Co-authored-by: Vikram Kanigiri <vikram.kanigiri@arm.com>
Co-authored-by: Yatharth Kochar <yatharth.kochar@arm.com>
diff --git a/tftf/framework/aarch32/arch.c b/tftf/framework/aarch32/arch.c
new file mode 100644
index 0000000..763ea1a
--- /dev/null
+++ b/tftf/framework/aarch32/arch.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <tftf.h>
+
+
+void tftf_arch_setup(void)
+{
+ if (!IS_IN_HYP())
+ panic();
+
+ write_hcr(HCR_TGE_BIT);
+}
diff --git a/tftf/framework/aarch32/asm_debug.S b/tftf/framework/aarch32/asm_debug.S
new file mode 100644
index 0000000..d2b2c79
--- /dev/null
+++ b/tftf/framework/aarch32/asm_debug.S
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+#if ENABLE_ASSERTIONS
+
+ .globl asm_assert
+
+/* Since the max decimal input number is 65536 */
+#define MAX_DEC_DIVISOR 10000
+
+/* The offset to add to get ascii for numerals '0 - 9' */
+#define ASCII_OFFSET_NUM '0'
+
+.section .rodata.assert_str, "aS"
+assert_msg1:
+ .asciz "ASSERT: File "
+assert_msg2:
+ .asciz " Line "
+
+/* ---------------------------------------------------------------------------
+ * Assertion support in assembly.
+ * The below function helps to support assertions in assembly where we do not
+ * have a C runtime stack. Arguments to the function are :
+ * r0 - File name
+ * r1 - Line no
+ * Clobber list : lr, r0 - r6
+ * ---------------------------------------------------------------------------
+ */
+func asm_assert
+ /*
+ * Only print the output if LOG_LEVEL is higher or equal to
+ * LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
+ */
+ /* Stash the parameters already in r0 and r1 */
+ mov r5, r0
+ mov r6, r1
+
+ /* Initialize crash console and verify success */
+ bl plat_crash_console_init
+ cmp r0, #0
+ beq 1f
+
+ /* Print file name */
+ ldr r4, =assert_msg1
+ bl asm_print_str
+ mov r4, r5
+ bl asm_print_str
+
+ /* Print line number string */
+ ldr r4, =assert_msg2
+ bl asm_print_str
+
+ /* Test for maximum supported line number */
+ ldr r4, =~0xffff
+ tst r6, r4
+ bne 1f
+ mov r4, r6
+
+ /* Print line number in decimal */
+ mov r6, #10 /* Divide by 10 after every loop iteration */
+ ldr r5, =MAX_DEC_DIVISOR
+dec_print_loop:
+ udiv r0, r4, r5 /* Quotient */
+ mls r4, r0, r5, r4 /* Remainder */
+ add r0, r0, #ASCII_OFFSET_NUM /* Convert to ASCII */
+ bl plat_crash_console_putc
+ udiv r5, r5, r6 /* Reduce divisor */
+ cmp r5, #0
+ bne dec_print_loop
+
+ bl plat_crash_console_flush
+
+1:
+ wfi
+ b 1b
+endfunc asm_assert
+
+/*
+ * This function prints a string from address in r4
+ * Clobber: lr, r0 - r4
+ */
+func asm_print_str
+ mov r3, lr
+1:
+ ldrb r0, [r4], #0x1
+ cmp r0, #0
+ beq 2f
+ bl plat_crash_console_putc
+ b 1b
+2:
+ bx r3
+endfunc asm_print_str
+
+/*
+ * This function prints a hexadecimal number in r4.
+ * In: r4 = the hexadecimal to print.
+ * Clobber: lr, r0 - r3, r5
+ */
+func asm_print_hex
+ mov r3, lr
+ mov r5, #32 /* No of bits to convert to ascii */
+1:
+ sub r5, r5, #4
+ lsr r0, r4, r5
+ and r0, r0, #0xf
+ cmp r0, #0xa
+ blo 2f
+ /* Add by 0x27 in addition to ASCII_OFFSET_NUM
+ * to get ascii for characters 'a - f'.
+ */
+ add r0, r0, #0x27
+2:
+ add r0, r0, #ASCII_OFFSET_NUM
+ bl plat_crash_console_putc
+ cmp r5, #0
+ bne 1b
+ bx r3
+endfunc asm_print_hex
+
+#endif /* ENABLE_ASSERTIONS */
diff --git a/tftf/framework/aarch32/entrypoint.S b/tftf/framework/aarch32/entrypoint.S
new file mode 100644
index 0000000..04a7d4c
--- /dev/null
+++ b/tftf/framework/aarch32/entrypoint.S
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <tftf.h>
+
+ .globl tftf_entrypoint
+ .globl tftf_hotplug_entry
+
+/* ----------------------------------------------------------------------------
+ * Cold boot entry point for the primary CPU.
+ * ----------------------------------------------------------------------------
+ */
+func tftf_entrypoint
+ /* --------------------------------------------------------------------
+ * Set the exception vectors
+ * --------------------------------------------------------------------
+ */
+ ldr r0, =tftf_vector
+ stcopr r0, HVBAR
+
+ /* --------------------------------------------------------------------
+ * Enable the instruction cache and asynchronous interrupts.
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, HSCTLR
+ ldr r1, =(HSCTLR_I_BIT | HSCTLR_A_BIT)
+ orr r0, r0, r1
+ stcopr r0, HSCTLR
+ isb
+
+ /* --------------------------------------------------------------------
+ * This code is expected to be executed only by the primary CPU.
+ * Save the mpid for the first core that executes and if a secondary
+ * CPU has lost its way make it spin forever.
+ * --------------------------------------------------------------------
+ */
+ bl save_primary_mpid
+
+ /* --------------------------------------------------------------------
+ * Zero out NOBITS sections. There are 2 of them:
+ * - the .bss section;
+ * - the coherent memory section.
+ * --------------------------------------------------------------------
+ */
+ ldr r0, =__BSS_START__
+ ldr r1, =__BSS_SIZE__
+ bl zeromem
+
+ ldr r0, =__COHERENT_RAM_START__
+ ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__
+ bl zeromem
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a small coherent stack to ease the pain of
+ * initializing the MMU
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, MPIDR
+ bl platform_set_coherent_stack
+
+ bl tftf_early_platform_setup
+ bl tftf_plat_arch_setup
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a stack allocated in Normal -IS-WBWA memory
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, MPIDR
+ bl platform_set_stack
+
+ /* --------------------------------------------------------------------
+ * tftf_cold_boot_main() will perform the remaining architectural and
+ * platform setup, initialise the test framework's state, then run the
+ * tests.
+ * --------------------------------------------------------------------
+ */
+ b tftf_cold_boot_main
+endfunc tftf_entrypoint
+
+/* ----------------------------------------------------------------------------
+ * Entry point for a CPU that has just been powered up.
+ * In : r0 - context_id
+ * ----------------------------------------------------------------------------
+ */
+func tftf_hotplug_entry
+
+ /* --------------------------------------------------------------------
+ * Preserve the context_id in a callee-saved register
+ * --------------------------------------------------------------------
+ */
+ mov r4, r0
+
+ /* --------------------------------------------------------------------
+ * Set the exception vectors
+ * --------------------------------------------------------------------
+ */
+ ldr r0, =tftf_vector
+ stcopr r0, HVBAR
+
+ /* --------------------------------------------------------------------
+ * Enable the instruction cache and asynchronous interrupts.
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, HSCTLR
+ ldr r1, =(HSCTLR_I_BIT | HSCTLR_A_BIT)
+ orr r0, r0, r1
+ stcopr r0, HSCTLR
+ isb
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a small coherent stack to ease the pain of
+ * initializing the MMU
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, MPIDR
+ bl platform_set_coherent_stack
+
+ /* --------------------------------------------------------------------
+ * Enable the MMU
+ * --------------------------------------------------------------------
+ */
+ bl tftf_plat_enable_mmu
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a stack in normal memory.
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, MPIDR
+ bl platform_set_stack
+
+ /* --------------------------------------------------------------------
+ * Save the context_id for later retrieval by tests
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, MPIDR
+ ldr r1, =MPID_MASK
+ and r0, r0, r1
+ bl platform_get_core_pos
+
+ mov r1, r4
+
+ bl tftf_set_cpu_on_ctx_id
+
+ /* --------------------------------------------------------------------
+ * Jump to warm boot main function
+ * --------------------------------------------------------------------
+ */
+ b tftf_warm_boot_main
+endfunc tftf_hotplug_entry
+
+/* ----------------------------------------------------------------------------
+ * Saves the mpid of the primary core and if the primary core
+ * is already saved then it loops infinitely.
+ * ----------------------------------------------------------------------------
+ */
+func save_primary_mpid
+ ldr r1, =tftf_primary_core
+ ldr r0, [r1]
+ mov r2, #INVALID_MPID
+ cmp r0, r2
+ bne panic
+ ldr r2, =MPID_MASK
+ ldcopr r0, MPIDR
+ and r0, r0, r2
+ str r0, [r1]
+ bx lr
+panic:
+ /* Primary core MPID already saved */
+ b panic
+endfunc save_primary_mpid
diff --git a/tftf/framework/aarch32/exceptions.S b/tftf/framework/aarch32/exceptions.S
new file mode 100644
index 0000000..1e6c574
--- /dev/null
+++ b/tftf/framework/aarch32/exceptions.S
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <tftf.h>
+
+ .globl tftf_vector
+
+vector_base tftf_vector
+ b tftf_entrypoint
+ b . /* Undef */
+ b . /* Syscall */
+ b . /* Prefetch abort */
+ b . /* Data abort */
+ b . /* Hyp trap */
+ b tftf_intr_handle/* IRQ */
+ b . /* FIQ */
+
+/* ----------------------------------------------------------------------------
+ * The IRQ exception handler
+ * ----------------------------------------------------------------------------
+ */
+func tftf_intr_handle
+ push {r0 - r3, lr}
+ bl tftf_irq_handler_dispatcher
+ pop {r0 - r3, lr}
+ eret
+endfunc tftf_intr_handle
diff --git a/tftf/framework/aarch64/arch.c b/tftf/framework/aarch64/arch.c
new file mode 100644
index 0000000..dfaa9d1
--- /dev/null
+++ b/tftf/framework/aarch64/arch.c
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+
+void tftf_arch_setup(void)
+{
+ /* Do not try to configure EL2 if TFTF is running at NS-EL1 */
+ if (IS_IN_EL2()) {
+ write_hcr_el2(HCR_TGE_BIT);
+ isb();
+ }
+}
diff --git a/tftf/framework/aarch64/asm_debug.S b/tftf/framework/aarch64/asm_debug.S
new file mode 100644
index 0000000..32c454f
--- /dev/null
+++ b/tftf/framework/aarch64/asm_debug.S
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+#if ENABLE_ASSERTIONS
+
+ .globl asm_assert
+
+/* Since the max decimal input number is 65536 */
+#define MAX_DEC_DIVISOR 10000
+/* The offset to add to get ascii for numerals '0 - 9' */
+#define ASCII_OFFSET_NUM 0x30
+
+.section .rodata.assert_str, "aS"
+assert_msg1:
+ .asciz "ASSERT: File "
+assert_msg2:
+ .asciz " Line "
+
+ /*
+ * This macro is intended to be used to print the
+ * line number in decimal. Used by asm_assert macro.
+ * The max number expected is 65536.
+ * In: x4 = the decimal to print.
+ * Clobber: x30, x0, x1, x2, x5, x6
+ */
+ .macro asm_print_line_dec
+ mov x6, #10 /* Divide by 10 after every loop iteration */
+ mov x5, #MAX_DEC_DIVISOR
+dec_print_loop:
+ udiv x0, x4, x5 /* Get the quotient */
+ msub x4, x0, x5, x4 /* Find the remainder */
+ add x0, x0, #ASCII_OFFSET_NUM /* Convert to ascii */
+ bl plat_crash_console_putc
+ udiv x5, x5, x6 /* Reduce divisor */
+ cbnz x5, dec_print_loop
+ .endm
+
+/* ---------------------------------------------------------------------------
+ * Assertion support in assembly.
+ * The below function helps to support assertions in assembly where we do not
+ * have a C runtime stack. Arguments to the function are :
+ * x0 - File name
+ * x1 - Line no
+ * Clobber list : x30, x0, x1, x2, x3, x4, x5, x6.
+ * ---------------------------------------------------------------------------
+ */
+func asm_assert
+ mov x5, x0
+ mov x6, x1
+ /* Ensure the console is initialized */
+ bl plat_crash_console_init
+ /* Check if the console is initialized */
+ cbz x0, _assert_loop
+ /* The console is initialized */
+ adr x4, assert_msg1
+ bl asm_print_str
+ mov x4, x5
+ bl asm_print_str
+ adr x4, assert_msg2
+ bl asm_print_str
+ /* Check if line number higher than max permitted */
+ tst x6, #~0xffff
+ b.ne _assert_loop
+ mov x4, x6
+ asm_print_line_dec
+ bl plat_crash_console_flush
+_assert_loop:
+ wfi
+ b _assert_loop
+endfunc asm_assert
+
+/*
+ * This function prints a string from address in x4.
+ * In: x4 = pointer to string.
+ * Clobber: x30, x0, x1, x2, x3
+ */
+func asm_print_str
+ mov x3, x30
+1:
+ ldrb w0, [x4], #0x1
+ cbz x0, 2f
+ bl plat_crash_console_putc
+ b 1b
+2:
+ ret x3
+endfunc asm_print_str
+
+/*
+ * This function prints a hexadecimal number in x4.
+ * In: x4 = the hexadecimal to print.
+ * Clobber: x30, x0 - x3, x5
+ */
+func asm_print_hex
+ mov x3, x30
+ mov x5, #64 /* No of bits to convert to ascii */
+1:
+ sub x5, x5, #4
+ lsrv x0, x4, x5
+ and x0, x0, #0xf
+ cmp x0, #0xA
+ b.lo 2f
+ /* Add by 0x27 in addition to ASCII_OFFSET_NUM
+ * to get ascii for characters 'a - f'.
+ */
+ add x0, x0, #0x27
+2:
+ add x0, x0, #ASCII_OFFSET_NUM
+ bl plat_crash_console_putc
+ cbnz x5, 1b
+ ret x3
+endfunc asm_print_hex
+
+#endif /* ENABLE_ASSERTIONS */
diff --git a/tftf/framework/aarch64/entrypoint.S b/tftf/framework/aarch64/entrypoint.S
new file mode 100644
index 0000000..dfedeae
--- /dev/null
+++ b/tftf/framework/aarch64/entrypoint.S
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <tftf.h>
+
+ .globl tftf_entrypoint
+ .globl tftf_hotplug_entry
+
+
+/* ----------------------------------------------------------------------------
+ * Cold boot entry point for the primary CPU.
+ * ----------------------------------------------------------------------------
+ */
+func tftf_entrypoint
+ /* --------------------------------------------------------------------
+ * Set the exception vectors
+ * --------------------------------------------------------------------
+ */
+ adr x0, tftf_vector
+ asm_write_vbar_el1_or_el2 x1
+
+ /* --------------------------------------------------------------------
+ * Enable the instruction cache, stack pointer and data access
+ * alignment checks
+ * --------------------------------------------------------------------
+ */
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ asm_read_sctlr_el1_or_el2
+ orr x0, x0, x1
+ asm_write_sctlr_el1_or_el2 x1
+ isb
+
+ /* --------------------------------------------------------------------
+ * This code is expected to be executed only by the primary CPU.
+ * Save the mpid for the first core that executes and if a secondary
+ * CPU has lost its way make it spin forever.
+ * --------------------------------------------------------------------
+ */
+ bl save_primary_mpid
+
+ /* --------------------------------------------------------------------
+ * Zero out NOBITS sections. There are 2 of them:
+ * - the .bss section;
+ * - the coherent memory section.
+ * --------------------------------------------------------------------
+ */
+ ldr x0, =__BSS_START__
+ ldr x1, =__BSS_SIZE__
+ bl zeromem16
+
+ ldr x0, =__COHERENT_RAM_START__
+ ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
+ bl zeromem16
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a small coherent stack to ease the pain of
+ * initializing the MMU
+ * --------------------------------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_coherent_stack
+
+ bl tftf_early_platform_setup
+ bl tftf_plat_arch_setup
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a stack allocated in Normal -IS-WBWA memory
+ * --------------------------------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_stack
+
+ /* --------------------------------------------------------------------
+ * tftf_cold_boot_main() will perform the remaining architectural and
+ * platform setup, initialise the test framework's state, then run the
+ * tests.
+ * --------------------------------------------------------------------
+ */
+ b tftf_cold_boot_main
+
+dead:
+ b dead
+endfunc tftf_entrypoint
+
+/* ----------------------------------------------------------------------------
+ * Entry point for a CPU that has just been powered up.
+ * In : x0 - context_id
+ * ----------------------------------------------------------------------------
+ */
+func tftf_hotplug_entry
+
+ /* --------------------------------------------------------------------
+ * Preserve the context_id in a callee-saved register
+ * --------------------------------------------------------------------
+ */
+ mov x19, x0
+
+ /* --------------------------------------------------------------------
+ * Set the exception vectors
+ * --------------------------------------------------------------------
+ */
+ adr x0, tftf_vector
+ asm_write_vbar_el1_or_el2 x1
+
+ /* --------------------------------------------------------------------
+ * Enable the instruction cache, stack pointer and data access
+ * alignment checks
+ * --------------------------------------------------------------------
+ */
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ asm_read_sctlr_el1_or_el2
+ orr x0, x0, x1
+ asm_write_sctlr_el1_or_el2 x1
+ isb
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a small coherent stack to ease the pain of
+ * initializing the MMU
+ * --------------------------------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_coherent_stack
+
+ /* --------------------------------------------------------------------
+ * Enable the MMU
+ * --------------------------------------------------------------------
+ */
+ bl tftf_plat_enable_mmu
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a stack in normal memory.
+ * --------------------------------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_stack
+
+ /* --------------------------------------------------------------------
+ * Save the context_id for later retrieval by tests
+ * --------------------------------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ and x0, x0, #MPID_MASK
+ bl platform_get_core_pos
+
+ mov x1, x19
+
+ bl tftf_set_cpu_on_ctx_id
+
+ /* --------------------------------------------------------------------
+ * Jump to warm boot main function
+ * --------------------------------------------------------------------
+ */
+ b tftf_warm_boot_main
+endfunc tftf_hotplug_entry
+
+/* ----------------------------------------------------------------------------
+ * Saves the mpid of the primary core and if the primary core
+ * is already saved then it loops infinitely.
+ * ----------------------------------------------------------------------------
+ */
+func save_primary_mpid
+ adrp x1, tftf_primary_core
+ ldr w0, [x1, :lo12:tftf_primary_core]
+ mov w2, #INVALID_MPID
+ cmp w0, w2
+ b.ne panic
+ mov x2, #MPID_MASK
+ mrs x0, mpidr_el1
+ and x0, x0, x2
+ str w0, [x1, :lo12:tftf_primary_core]
+ ret
+panic:
+ /* Primary core MPID already saved */
+ b .
+ ret
+endfunc save_primary_mpid
diff --git a/tftf/framework/aarch64/exceptions.S b/tftf/framework/aarch64/exceptions.S
new file mode 100644
index 0000000..08bef46
--- /dev/null
+++ b/tftf/framework/aarch64/exceptions.S
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl tftf_vector
+
+vector_base tftf_vector
+ //-----------------------------------------------------
+ // Current EL with SP0 : 0x0 - 0x180
+ //-----------------------------------------------------
+vector_entry SynchronousExceptionSP0
+ b SynchronousExceptionSP0
+ check_vector_size SynchronousExceptionSP0
+
+vector_entry IrqSP0
+ b IrqSP0
+ check_vector_size IrqSP0
+
+vector_entry FiqSP0
+ b FiqSP0
+ check_vector_size FiqSP0
+
+vector_entry SErrorSP0
+ b SErrorSP0
+ check_vector_size SErrorSP0
+
+ //-----------------------------------------------------
+ // Current EL with SPx: 0x200 - 0x380
+ //-----------------------------------------------------
+vector_entry SynchronousExceptionSPx
+ b SynchronousExceptionSPx
+ check_vector_size SynchronousExceptionSPx
+
+vector_entry IrqSPx
+ /*
+ * TODO: Investigate whether the Trusted Firmware-A code for context
+ * save/restore could be reused
+ */
+ stp x29, x30, [sp, #-0x10]!
+ bl save_regs
+ bl tftf_irq_handler_dispatcher
+ bl restore_regs
+ ldp x29, x30, [sp], #0x10
+ eret
+ check_vector_size IrqSPx
+
+vector_entry FiqSPx
+ b FiqSPx
+ check_vector_size FiqSPx
+
+vector_entry SErrorSPx
+ b SErrorSPx
+ check_vector_size SErrorSPx
+
+ //-----------------------------------------------------
+ // Lower EL using AArch64 : 0x400 - 0x580
+ //-----------------------------------------------------
+vector_entry SynchronousExceptionA64
+ b SynchronousExceptionA64
+ check_vector_size SynchronousExceptionA64
+
+vector_entry IrqA64
+ b IrqA64
+ check_vector_size IrqA64
+
+vector_entry FiqA64
+ b FiqA64
+ check_vector_size FiqA64
+
+vector_entry SErrorA64
+ b SErrorA64
+ check_vector_size SErrorA64
+
+ //-----------------------------------------------------
+ // Lower EL using AArch32 : 0x0 - 0x180
+ //-----------------------------------------------------
+vector_entry SynchronousExceptionA32
+ b SynchronousExceptionA32
+ check_vector_size SynchronousExceptionA32
+
+vector_entry IrqA32
+ b IrqA32
+ check_vector_size IrqA32
+
+vector_entry FiqA32
+ b FiqA32
+ check_vector_size FiqA32
+
+vector_entry SErrorA32
+ b SErrorA32
+ check_vector_size SErrorA32
+
+
+// Note: Exceptions will always be from the same EL, so no need to save spsr
+func save_regs
+ sub sp, sp, #0x100
+ stp x0, x1, [sp, #0x0]
+ stp x2, x3, [sp, #0x10]
+ stp x4, x5, [sp, #0x20]
+ stp x6, x7, [sp, #0x30]
+ stp x8, x9, [sp, #0x40]
+ stp x10, x11, [sp, #0x50]
+ stp x12, x13, [sp, #0x60]
+ stp x14, x15, [sp, #0x70]
+ stp x16, x17, [sp, #0x80]
+ stp x18, x19, [sp, #0x90]
+ stp x20, x21, [sp, #0xa0]
+ stp x22, x23, [sp, #0xb0]
+ stp x24, x25, [sp, #0xc0]
+ stp x26, x27, [sp, #0xd0]
+ mrs x0, sp_el0
+ stp x28, x0, [sp, #0xe0]
+ str x0, [sp, #0xf0]
+ ret
+endfunc save_regs
+
+
+// Note: Exceptions will always be from the same EL, so no need to restore spsr
+func restore_regs
+ ldr x9, [sp, #0xf0]
+ ldp x28, x9, [sp, #0xe0]
+ msr sp_el0, x9
+ ldp x26, x27, [sp, #0xd0]
+ ldp x24, x25, [sp, #0xc0]
+ ldp x22, x23, [sp, #0xb0]
+ ldp x20, x21, [sp, #0xa0]
+ ldp x18, x19, [sp, #0x90]
+ ldp x16, x17, [sp, #0x80]
+ ldp x14, x15, [sp, #0x70]
+ ldp x12, x13, [sp, #0x60]
+ ldp x10, x11, [sp, #0x50]
+ ldp x8, x9, [sp, #0x40]
+ ldp x6, x7, [sp, #0x30]
+ ldp x4, x5, [sp, #0x20]
+ ldp x2, x3, [sp, #0x10]
+ ldp x0, x1, [sp, #0x0]
+ add sp, sp, #0x100
+ ret
+endfunc restore_regs
diff --git a/tftf/framework/debug.c b/tftf/framework/debug.c
new file mode 100644
index 0000000..4b4364d
--- /dev/null
+++ b/tftf/framework/debug.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <console.h>
+#include <debug.h>
+
+#if DEBUG
+void __attribute__((__noreturn__)) do_panic(const char *file, int line)
+{
+ printf("PANIC in file: %s line: %d\n", file, line);
+
+ console_flush();
+
+ while (1)
+ continue;
+}
+
+void __attribute__((__noreturn__)) do_bug_unreachable(const char *file, int line)
+{
+ mp_printf("BUG: Unreachable code!\n");
+ do_panic(file, line);
+}
+
+#else
+void __attribute__((__noreturn__)) do_panic(void)
+{
+ printf("PANIC\n");
+
+ console_flush();
+
+ while (1)
+ continue;
+}
+
+void __attribute__((__noreturn__)) do_bug_unreachable(void)
+{
+ mp_printf("BUG: Unreachable code!\n");
+ do_panic();
+}
+#endif
diff --git a/tftf/framework/framework.mk b/tftf/framework/framework.mk
new file mode 100644
index 0000000..f6e1d7b
--- /dev/null
+++ b/tftf/framework/framework.mk
@@ -0,0 +1,115 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+AUTOGEN_DIR := $(BUILD_PLAT)/autogen
+
+include lib/xlat_tables_v2/xlat_tables.mk
+include lib/compiler-rt/compiler-rt.mk
+
+TFTF_INCLUDES := \
+ -I${AUTOGEN_DIR} \
+ -Itftf/framework/include \
+ -Iinclude/common \
+ -Iinclude/common/${ARCH} \
+ -Iinclude/drivers \
+ -Iinclude/drivers/arm \
+ -Iinclude/drivers/io \
+ -Iinclude/lib \
+ -Iinclude/lib/${ARCH} \
+ -Iinclude/lib/extensions \
+ -Iinclude/lib/stdlib \
+ -Iinclude/lib/stdlib/sys \
+ -Iinclude/lib/utils \
+ -Iinclude/lib/xlat_tables \
+ -Iinclude/plat/common \
+ -Iinclude/runtime_services \
+ -Iinclude/runtime_services/secure_el0_payloads \
+ -Iinclude/runtime_services/secure_el1_payloads
+
+# Standard C library source files
+STD_LIB_SOURCES := lib/stdlib/abort.c \
+ lib/stdlib/assert.c \
+ lib/stdlib/mem.c \
+ lib/stdlib/printf.c \
+ lib/stdlib/putchar.c \
+ lib/stdlib/puts.c \
+ lib/stdlib/rand.c \
+ lib/stdlib/strchr.c \
+ lib/stdlib/strcmp.c \
+ lib/stdlib/strlen.c \
+ lib/stdlib/strncmp.c \
+ lib/stdlib/strncpy.c \
+ lib/stdlib/subr_prf.c
+
+FRAMEWORK_SOURCES := ${AUTOGEN_DIR}/tests_list.c
+
+FRAMEWORK_SOURCES += $(addprefix tftf/, \
+ framework/${ARCH}/arch.c \
+ framework/${ARCH}/asm_debug.S \
+ framework/${ARCH}/entrypoint.S \
+ framework/${ARCH}/exceptions.S \
+ framework/debug.c \
+ framework/main.c \
+ framework/nvm_results_helpers.c \
+ framework/report.c \
+ framework/timer/timer_framework.c \
+ tests/common/test_helpers.c \
+)
+
+FRAMEWORK_SOURCES += \
+ lib/${ARCH}/cache_helpers.S \
+ lib/${ARCH}/misc_helpers.S \
+ lib/delay/delay.c \
+ lib/events/events.c \
+ lib/extensions/amu/${ARCH}/amu.c \
+ lib/extensions/amu/${ARCH}/amu_helpers.S \
+ lib/irq/irq.c \
+ lib/locks/${ARCH}/spinlock.S \
+ lib/power_management/hotplug/hotplug.c \
+ lib/power_management/suspend/${ARCH}/asm_tftf_suspend.S \
+ lib/power_management/suspend/tftf_suspend.c \
+ lib/psci/psci.c \
+ lib/sdei/sdei.c \
+ lib/smc/${ARCH}/asm_smc.S \
+ lib/smc/${ARCH}/smc.c \
+ ${STD_LIB_SOURCES} \
+ lib/trusted_os/trusted_os.c \
+ lib/utils/mp_printf.c \
+ lib/utils/uuid.c \
+ ${XLAT_TABLES_LIB_SRCS} \
+ plat/common/${ARCH}/platform_helpers.S \
+ plat/common/${ARCH}/platform_mp_stack.S \
+ plat/common/plat_common.c \
+ plat/common/plat_state_id.c \
+ plat/common/plat_topology.c \
+ plat/common/tftf_nvm_accessors.c
+
+
+FRAMEWORK_SOURCES += ${COMPILER_RT_SRCS}
+
+TFTF_LINKERFILE := tftf/framework/tftf.ld.S
+
+
+TFTF_DEFINES :=
+
+TEST_REPORTS ?= uart:raw
+$(info Selected reports: $(TEST_REPORTS))
+ifneq (,$(findstring uart:raw,$(TEST_REPORTS)))
+ TFTF_DEFINES += -DTEST_REPORT_UART_RAW
+endif
+ifneq (,$(findstring uart:junit,$(TEST_REPORTS)))
+ TFTF_DEFINES += -DTEST_REPORT_UART_JUNIT
+endif
+ifneq (,$(findstring semihosting:raw,$(TEST_REPORTS)))
+ TFTF_DEFINES += -DTEST_REPORT_SEMIHOSTING_RAW
+endif
+ifneq (,$(findstring semihosting:junit,$(TEST_REPORTS)))
+ TFTF_DEFINES += -DTEST_REPORT_SEMIHOSTING_JUNIT
+endif
+
+# Enable dynamic translation tables
+PLAT_XLAT_TABLES_DYNAMIC := 1
+$(eval $(call add_define,TFTF_DEFINES,PLAT_XLAT_TABLES_DYNAMIC))
diff --git a/tftf/framework/include/nvm.h b/tftf/framework/include/nvm.h
new file mode 100644
index 0000000..3544c2a
--- /dev/null
+++ b/tftf/framework/include/nvm.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __NVM_H__
+#define __NVM_H__
+
+#ifndef __ASSEMBLY__
+#include <stddef.h>
+#include <tftf.h>
+#include "tests_list.h"
+
+#define TEST_BUFFER_SIZE 0x80
+
+typedef struct {
+ /*
+ * @brief Last executed TFTF build message which consists of date and
+ * time when TFTF is built.
+ *
+ * A mismatch with the build message of currently executing binary will
+ * determine whether TFTF data structures stored in NVM needs to be
+ * initialised or not.
+ */
+ char build_message[BUILD_MESSAGE_SIZE];
+
+ /*
+ * The following 2 fields track the progress in the test session. They
+ * indicate which test case we are dealing with and the progress of this
+ * test, i.e. whether it hasn't started yet, or it is being executed
+ * right now, ...
+ */
+ test_ref_t test_to_run;
+ test_progress_t test_progress;
+
+ /*
+ * @brief Scratch buffer for test internal use.
+ *
+ * A buffer that the test can use as a scratch area for whatever it is
+ * doing.
+ */
+ char testcase_buffer[TEST_BUFFER_SIZE];
+
+ /*
+ * @brief Results of tests.
+ *
+ * @note TESTCASE_RESULT_COUNT is defined in tests_list.h
+ * (auto-generated file).
+ */
+ TESTCASE_RESULT testcase_results[TESTCASE_RESULT_COUNT];
+
+ /*
+ * @brief Size of \a result_buffer.
+ */
+ unsigned result_buffer_size;
+
+ /*
+ * Buffer containing the output of all tests.
+ * Each test appends its output to the end of \a result_buffer.
+ * Tests which produce no output write nothing in \a result_buffer.
+ */
+ char *result_buffer;
+} tftf_state_t;
+
+/*
+ * Helper macros to access fields of \a tftf_state_t structure.
+ */
+#define TFTF_STATE_OFFSET(_field) offsetof(tftf_state_t, _field)
+
+/*
+ * Return 1 if we need to start a new test session;
+ * 0 if we need to resume an interrupted session.
+ */
+unsigned int new_test_session(void);
+
+/*
+ * @brief Initialize NVM if necessary.
+ *
+ * When TFTF is launched on the target, its data structures need
+ * to be initialised in NVM. However if some test resets the board
+ * (as part of its normal behaviour or because it crashed) then
+ * TFTF data structure must be left unchanged in order to resume
+ * the test session where it has been left.
+ *
+ * This function detects whether TFTF has just been launched and if so
+ * initialises its data structures. If TFTF has just reset then it does
+ * nothing.
+ *
+ * @return STATUS_SUCCESS on success, another status code on failure.
+ */
+STATUS tftf_init_nvm(void);
+
+/*
+ * @brief Clean NVM.
+ *
+ * Clean TFTF data structures in NVM.
+ * This function must be called when all tests have completed.
+ *
+ * @return STATUS_SUCCESS on success, another status code on failure.
+ */
+STATUS tftf_clean_nvm(void);
+
+/* Writes the buffer to the flash at offset with length equal to
+ * size
+ * Returns: STATUS_FAIL, STATUS_SUCCESS, STATUS_OUT_OF_RESOURCES
+ */
+STATUS tftf_nvm_write(unsigned long long offset, const void *buffer, size_t size);
+
+/* Reads the flash into buffer at offset with length equal to
+ * size
+ * Returns: STATUS_FAIL, STATUS_SUCCESS, STATUS_OUT_OF_RESOURCES
+ */
+STATUS tftf_nvm_read(unsigned long long offset, void *buffer, size_t size);
+#endif /*__ASSEMBLY__*/
+
+#endif
diff --git a/tftf/framework/include/tftf.h b/tftf/framework/include/tftf.h
new file mode 100644
index 0000000..8231e28
--- /dev/null
+++ b/tftf/framework/include/tftf.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TFTF_H__
+#define __TFTF_H__
+
+#ifndef __ASSEMBLY__
+#include <status.h>
+#include <stddef.h>
+#include <tftf_lib.h>
+
+#define TFTF_WELCOME_STR "Booting trusted firmware test framework"
+
+/* Maximum size of test output (in bytes) */
+#define TESTCASE_OUTPUT_MAX_SIZE 512
+
+/* Size of build message used to differentiate different TFTF binaries */
+#define BUILD_MESSAGE_SIZE 0x20
+
+extern const char build_message[];
+
+typedef test_result_t (*test_function_t)(void);
+
+typedef struct {
+ /* Test result (success, crashed, failed, ...). */
+ test_result_t result;
+ unsigned long long duration;
+ /*
+ * Offset of test output string from TEST_NVM_RESULT_BUFFER_OFFSET.
+ * Only relevant if test has an output, i.e. if \a output_size is not
+ * zero.
+ */
+ unsigned output_offset;
+ /* Size of test output string, excluding final \0. */
+ unsigned output_size;
+} TESTCASE_RESULT;
+
+typedef struct {
+ unsigned index;
+ const char *name;
+ const char *description;
+ test_function_t test;
+} test_case_t;
+
+typedef struct {
+ const char *name;
+ const char *description;
+ const test_case_t *testcases;
+} test_suite_t;
+
+/*
+ * Reference to a specific test.
+ */
+typedef struct {
+ unsigned int testsuite_idx;
+ unsigned int testcase_idx;
+} test_ref_t;
+
+/*
+ * The progress in the execution of a test.
+ * This is used to implement the following state machine.
+ *
+ * +-> TEST_READY (initial state of the test) <--------------+
+ * | | |
+ * | | Test framework prepares the test environment. |
+ * | | |
+ * | v |
+ * | TEST_IN_PROGRESS |
+ * | | |
+ * | | Hand over to the test function. |
+ * | | If the test wants to reboot the platform ---> TEST_REBOOTING |
+ * | | Test function returns into framework. | |
+ * | | | Reboot |
+ * | | | |
+ * | | +---------+
+ * | v
+ * | TEST_COMPLETE
+ * | |
+ * | | Do some framework management.
+ * | | Move to next test.
+ * +--------+
+ */
+typedef enum {
+ TEST_PROGRESS_MIN = 0,
+ TEST_READY = TEST_PROGRESS_MIN,
+ TEST_IN_PROGRESS,
+ TEST_COMPLETE,
+ TEST_REBOOTING,
+
+ TEST_PROGRESS_MAX,
+} test_progress_t;
+
+#define TEST_PROGRESS_IS_VALID(_progress) \
+ ((_progress >= TEST_PROGRESS_MIN) && (_progress < TEST_PROGRESS_MAX))
+
+/*
+ * The definition of this global variable is generated by the script
+ * 'tftf_generate_test_list' during the build process
+ */
+extern const test_suite_t testsuites[];
+
+extern TESTCASE_RESULT testcase_results[];
+
+/* Set/Get the test to run in NVM */
+STATUS tftf_set_test_to_run(const test_ref_t test_to_run);
+STATUS tftf_get_test_to_run(test_ref_t *test_to_run);
+/* Set/Get the progress of the current test in NVM */
+STATUS tftf_set_test_progress(test_progress_t test_progress);
+STATUS tftf_get_test_progress(test_progress_t *test_progress);
+
+/**
+** Save test result into NVM.
+*/
+STATUS tftf_testcase_set_result(const test_case_t *testcase,
+ test_result_t result,
+ unsigned long long duration);
+/**
+** Get a testcase result from NVM.
+**
+** @param[in] testcase The targeted testcase.
+** @param[out] result Testcase result. Only \a result.result and
+** \a result.duration are of interest for the caller and the 2 other fields
+** should be ignored (they correspond to a location in NVM).
+** @param[out] test_output Buffer to store the test output, if any.
+** \a test_output must be big enough to hold the whole test output.
+** Test output will be \a TESTCASE_OUTPUT_MAX_SIZE bytes maximum.
+*/
+STATUS tftf_testcase_get_result(const test_case_t *testcase, TESTCASE_RESULT *result, char *test_output);
+
+void tftf_report_generate(void);
+
+/*
+ * Exit the TFTF.
+ * This function can be used when a fatal error is encountered or as part of the
+ * normal termination process. It does the necessary cleanups then put the
+ * core in a low-power state.
+ */
+void __dead2 tftf_exit(void);
+
+void tftf_arch_setup(void);
+
+/*
+ * This function detects the power state format used by PSCI which can
+ * be either extended or original format. For the Original format,
+ * the State-ID can either be NULL or can be using the recommended encoding.
+ * This function needs to be invoked once during cold boot prior to the
+ * invocation of any PSCI power state helper functions.
+ */
+void tftf_detect_psci_pstate_format(void);
+
+/*
+ * Run the next test on the calling CPU.
+ * Once the test is complete, if the calling CPU is the last one to exit the
+ * test then do the necessary bookkeeping, report the overall test result and
+ * move on to the next test. Otherwise, shut down the calling CPU.
+ *
+ * This function never returns.
+ */
+void __dead2 run_tests(void);
+
+/* Entry point for a CPU that has just been powered up */
+void tftf_hotplug_entry(void);
+
+#endif /*__ASSEMBLY__*/
+
+#endif
diff --git a/tftf/framework/main.c b/tftf/framework/main.c
new file mode 100644
index 0000000..3f94dc9
--- /dev/null
+++ b/tftf/framework/main.c
@@ -0,0 +1,599 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <debug.h>
+#include <irq.h>
+#include <mmio.h>
+#include <nvm.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <string.h>
+#include <sys/types.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+/* version information for TFTF */
+extern const char version_string[];
+
+unsigned int lead_cpu_mpid;
+
+/* Defined in hotplug.c */
+extern volatile test_function_t test_entrypoint[PLATFORM_CORE_COUNT];
+
+/* Per-CPU results for the current test */
+static test_result_t test_results[PLATFORM_CORE_COUNT];
+
+/* Context ID passed to tftf_psci_cpu_on() */
+static u_register_t cpu_on_ctx_id_arr[PLATFORM_CORE_COUNT];
+
+static unsigned int test_is_rebooting;
+
+static inline const test_suite_t *current_testsuite(void)
+{
+ test_ref_t test_to_run;
+ tftf_get_test_to_run(&test_to_run);
+ return &testsuites[test_to_run.testsuite_idx];
+}
+
+static inline const test_case_t *current_testcase(void)
+{
+ test_ref_t test_to_run;
+ tftf_get_test_to_run(&test_to_run);
+ return &testsuites[test_to_run.testsuite_idx].
+ testcases[test_to_run.testcase_idx];
+}
+
+/*
+ * Identify the next test in the tests list and update the NVM data to point to
+ * that test.
+ * If there is no more tests to execute, return NULL.
+ * Otherwise, return the test case.
+ */
+static const test_case_t *advance_to_next_test(void)
+{
+ test_ref_t test_to_run;
+ const test_case_t *testcase;
+ unsigned int testcase_idx;
+ unsigned int testsuite_idx;
+
+#if DEBUG
+ test_progress_t progress;
+ tftf_get_test_progress(&progress);
+ assert(progress == TEST_COMPLETE);
+#endif
+
+ tftf_get_test_to_run(&test_to_run);
+ testcase_idx = test_to_run.testcase_idx;
+ testsuite_idx = test_to_run.testsuite_idx;
+
+ /* Move to the next test case in the current test suite */
+ ++testcase_idx;
+ testcase = &testsuites[testsuite_idx].testcases[testcase_idx];
+
+ if (testcase->name == NULL) {
+ /*
+ * There's no more test cases in the current test suite so move
+ * to the first test case of the next test suite.
+ */
+ const test_suite_t *testsuite;
+ testcase_idx = 0;
+ ++testsuite_idx;
+ testsuite = &testsuites[testsuite_idx];
+ testcase = &testsuite->testcases[0];
+
+ if (testsuite->name == NULL) {
+ /*
+ * This was the last test suite so there's no more tests
+ * at all.
+ */
+ return NULL;
+ }
+ }
+
+ VERBOSE("Moving to test (%u,%u)\n", testsuite_idx, testcase_idx);
+ test_to_run.testsuite_idx = testsuite_idx;
+ test_to_run.testcase_idx = testcase_idx;
+ tftf_set_test_to_run(test_to_run);
+ tftf_set_test_progress(TEST_READY);
+
+ return testcase;
+}
+
+/*
+ * This function is executed only by the lead CPU.
+ * It prepares the environment for the next test to run.
+ */
+static void prepare_next_test(void)
+{
+ unsigned int mpid;
+ unsigned int core_pos;
+ unsigned int cpu_node;
+
+ /* This function should be called by the lead CPU only */
+ assert((read_mpidr_el1() & MPID_MASK) == lead_cpu_mpid);
+
+ /*
+ * Only the lead CPU should be powered on at this stage. All other CPUs
+ * should be powered off or powering off. If some CPUs are not powered
+ * off yet, wait for them to power off.
+ */
+ for_each_cpu(cpu_node) {
+ mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (mpid == lead_cpu_mpid)
+ assert(tftf_is_cpu_online(mpid));
+ else
+ while (tftf_psci_affinity_info(mpid, MPIDR_AFFLVL0)
+ == PSCI_STATE_ON)
+ ;
+ }
+
+ /* No CPU should have entered the test yet */
+ assert(tftf_get_ref_cnt() == 0);
+
+ /* Populate the test entrypoint for the lead CPU */
+ core_pos = platform_get_core_pos(lead_cpu_mpid);
+ test_entrypoint[core_pos] = (test_function_t) current_testcase()->test;
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i)
+ test_results[i] = TEST_RESULT_NA;
+
+ NOTICE("Starting unittest '%s - %s'\n",
+ current_testsuite()->name, current_testcase()->name);
+
+ /* Program the watchdog */
+ tftf_platform_watchdog_set();
+
+ /* TODO: Take a 1st timestamp to be able to measure test duration */
+
+ tftf_set_test_progress(TEST_IN_PROGRESS);
+}
+
+/*
+ * Go through individual CPUs' test results and determine the overall
+ * test result from that.
+ */
+static test_result_t get_overall_test_result(void)
+{
+ test_result_t result = TEST_RESULT_NA;
+ unsigned int cpu_mpid;
+ unsigned int cpu_node;
+ unsigned int core_pos;
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ switch (test_results[core_pos]) {
+ case TEST_RESULT_NA:
+ VERBOSE("CPU%u not involved in the test\n", core_pos);
+ /* Ignoring */
+ break;
+
+ case TEST_RESULT_SKIPPED:
+ /*
+ * If at least one CPU skipped the test, consider the
+ * whole test as skipped as well.
+ */
+ NOTICE("CPU%u skipped the test\n", core_pos);
+ return TEST_RESULT_SKIPPED;
+
+ case TEST_RESULT_SUCCESS:
+ result = TEST_RESULT_SUCCESS;
+ break;
+
+ case TEST_RESULT_FAIL:
+ ERROR("CPU%u failed the test\n", core_pos);
+ return TEST_RESULT_FAIL;
+
+ case TEST_RESULT_CRASHED:
+ /*
+ * Means the CPU never returned from the test whereas it
+ * was supposed to. Either there is a bug in the test's
+ * implementation or some sort of unexpected crash
+ * happened.
+ * If at least one CPU crashed, consider the whole test
+ * as crashed as well.
+ */
+ ERROR("CPU%u never returned from the test!\n", core_pos);
+ return TEST_RESULT_CRASHED;
+
+ default:
+ ERROR("Unknown test result value: %u\n",
+ test_results[core_pos]);
+ panic();
+ }
+ }
+
+ /*
+ * At least one CPU (i.e. the lead CPU) should have participated in the
+ * test.
+ */
+ assert(result != TEST_RESULT_NA);
+ return result;
+}
+
+/*
+ * This function is executed by the last CPU to exit the test only.
+ * It does the necessary bookkeeping and reports the overall test result.
+ * If it was the last test, it will also generate the final test report.
+ * Otherwise, it will reset the platform, provided that the platform
+ * supports reset from non-trusted world. This ensures that the next test
+ * runs in a clean environment
+ *
+ * Return 1 if this was the last test, 0 otherwise.
+ */
+static unsigned int close_test(void)
+{
+ const test_case_t *next_test;
+
+#if DEBUG
+ /*
+ * Check that the test didn't pretend resetting the platform, when in
+ * fact it returned into the framework.
+ *
+ * If that happens, the test implementation should be fixed.
+ * However, it is not a fatal error so just flag the problem in debug
+ * builds.
+ */
+ test_progress_t progress;
+ tftf_get_test_progress(&progress);
+ assert(progress != TEST_REBOOTING);
+#endif /* DEBUG */
+
+ tftf_set_test_progress(TEST_COMPLETE);
+ test_is_rebooting = 0;
+
+ /* TODO: Take a 2nd timestamp and compute test duration */
+
+ /* Reset watchdog */
+ tftf_platform_watchdog_reset();
+
+ /* Ensure no CPU is still executing the test */
+ assert(tftf_get_ref_cnt() == 0);
+
+ /* Save test result in NVM */
+ test_result_t overall_test_result = get_overall_test_result();
+ tftf_testcase_set_result(current_testcase(),
+ overall_test_result,
+ 0);
+
+ NOTICE("Unittest '%s - %s' complete. Result: %s\n",
+ current_testsuite()->name, current_testcase()->name,
+ test_result_to_string(overall_test_result));
+
+ /* The test is finished, let's move to the next one (if any) */
+ next_test = advance_to_next_test();
+
+ /* If this was the last test then report all results */
+ if (!next_test) {
+ tftf_report_generate();
+ tftf_clean_nvm();
+ return 1;
+ } else {
+#if (PLAT_SUPPORTS_NS_RESET && !NEW_TEST_SESSION && USE_NVM)
+ /*
+ * Reset the platform so that the next test runs in a clean
+ * environment.
+ */
+ INFO("Reset platform before executing next test:%p\n",
+ (void *) &(next_test->test));
+ tftf_plat_reset();
+ bug_unreachable();
+#endif
+ }
+
+ return 0;
+}
+
+/*
+ * Hand over to lead CPU, i.e.:
+ * 1) Power on lead CPU
+ * 2) Power down calling CPU
+ */
+static void __dead2 hand_over_to_lead_cpu(void)
+{
+ int ret;
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ VERBOSE("CPU%u: Hand over to lead CPU%u\n", core_pos,
+ platform_get_core_pos(lead_cpu_mpid));
+
+ /*
+ * Power on lead CPU.
+ * The entry point address passed as the 2nd argument of tftf_cpu_on()
+ * doesn't matter because it will be overwritten by prepare_next_test().
+ * Pass a NULL pointer to easily catch the problem in case something
+ * goes wrong.
+ */
+ ret = tftf_cpu_on(lead_cpu_mpid, 0, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU%u: Failed to power on lead CPU%u (%d)\n",
+ core_pos, platform_get_core_pos(lead_cpu_mpid), ret);
+ panic();
+ }
+
+ /* Wait for lead CPU to be actually powered on */
+ while (!tftf_is_cpu_online(lead_cpu_mpid))
+ ;
+
+ /*
+ * Lead CPU has successfully booted, let's now power down the calling
+ * core.
+ */
+ tftf_cpu_off();
+ panic();
+}
+
+void __dead2 run_tests(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ unsigned int test_session_finished;
+ unsigned int cpus_cnt;
+
+ while (1) {
+ if (mpid == lead_cpu_mpid && (tftf_get_ref_cnt() == 0))
+ prepare_next_test();
+
+ /*
+ * Increment the reference count to indicate that the CPU is
+ * participating in the test.
+ */
+ tftf_inc_ref_cnt();
+
+ /*
+ * Mark the CPU's test result as "crashed". This is meant to be
+ * overwritten by the actual test result when the CPU returns
+ * from the test function into the framework. In case the CPU
+ * crashes in the test (and thus, never returns from it), this
+ * variable will hold the right value.
+ */
+ test_results[core_pos] = TEST_RESULT_CRASHED;
+
+ /*
+ * Jump to the test entrypoint for this core.
+ * - For the lead CPU, it has been populated by
+ * prepare_next_test()
+ * - For other CPUs, it has been populated by tftf_cpu_on() or
+ * tftf_try_cpu_on()
+ */
+ while (test_entrypoint[core_pos] == 0)
+ ;
+
+ test_results[core_pos] = test_entrypoint[core_pos]();
+ test_entrypoint[core_pos] = 0;
+
+ /*
+ * Decrement the reference count to indicate that the CPU is not
+ * participating in the test any longer.
+ */
+ cpus_cnt = tftf_dec_ref_cnt();
+
+ /*
+ * Last CPU to exit the test gets to do the necessary
+ * bookkeeping and to report the overall test result.
+ * Other CPUs shut down.
+ */
+ if (cpus_cnt == 0) {
+ test_session_finished = close_test();
+ if (test_session_finished)
+ break;
+
+ if (mpid != lead_cpu_mpid) {
+ hand_over_to_lead_cpu();
+ bug_unreachable();
+ }
+ } else {
+ tftf_cpu_off();
+ panic();
+ }
+ }
+
+ tftf_exit();
+
+ /* Should never reach this point */
+ bug_unreachable();
+}
+
+u_register_t tftf_get_cpu_on_ctx_id(unsigned int core_pos)
+{
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ return cpu_on_ctx_id_arr[core_pos];
+}
+
+void tftf_set_cpu_on_ctx_id(unsigned int core_pos, u_register_t context_id)
+{
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ cpu_on_ctx_id_arr[core_pos] = context_id;
+}
+
+unsigned int tftf_is_rebooted(void)
+{
+ return test_is_rebooting;
+}
+
+/*
+ * Return 0 if the test session can be resumed
+ * -1 otherwise.
+ */
+static int resume_test_session(void)
+{
+ test_ref_t test_to_run;
+ test_progress_t test_progress;
+ const test_case_t *next_test;
+
+ /* Get back on our feet. Where did we stop? */
+ tftf_get_test_to_run(&test_to_run);
+ tftf_get_test_progress(&test_progress);
+ assert(TEST_PROGRESS_IS_VALID(test_progress));
+
+ switch (test_progress) {
+ case TEST_READY:
+ /*
+ * The TFTF has reset in the framework code, before the test
+ * actually started.
+ * Nothing to update, just start the test from scratch.
+ */
+ break;
+
+ case TEST_IN_PROGRESS:
+ /*
+ * The test crashed, i.e. it couldn't complete.
+ * Update the test result in NVM then move to the next test.
+ */
+ INFO("Test has crashed, moving to the next one\n");
+ tftf_testcase_set_result(current_testcase(),
+ TEST_RESULT_CRASHED,
+ 0);
+ next_test = advance_to_next_test();
+ if (!next_test) {
+ INFO("No more tests\n");
+ return -1;
+ }
+ break;
+
+ case TEST_COMPLETE:
+ /*
+ * The TFTF has reset in the framework code, after the test had
+ * completed but before we finished the framework maintenance
+ * required to move to the next test.
+ *
+ * In this case, we don't know the exact state of the data:
+ * maybe we had the time to update the test result,
+ * maybe we had the time to move to the next test.
+ * We can't be sure so let's stay on the safe side and just
+ * restart the test session from the beginning...
+ */
+ NOTICE("The test framework has been interrupted in the middle "
+ "of critical maintenance operations.\n");
+ NOTICE("Can't recover execution.\n");
+ return -1;
+
+ case TEST_REBOOTING:
+ /*
+ * Nothing to update about the test session, as we want to
+ * re-enter the same test. Just remember that the test is
+ * rebooting in case it queries this information.
+ */
+ test_is_rebooting = 1;
+ break;
+
+ default:
+ bug_unreachable();
+ }
+
+ return 0;
+}
+
+/*
+ * C entry point in the TFTF.
+ * This function is executed by the primary CPU only.
+ */
+void __dead2 tftf_cold_boot_main(void)
+{
+ STATUS status;
+ int rc;
+
+ NOTICE("%s\n", TFTF_WELCOME_STR);
+ NOTICE("%s\n", build_message);
+ NOTICE("%s\n\n", version_string);
+
+#ifndef AARCH32
+ NOTICE("Running at NS-EL%u\n", IS_IN_EL(1) ? 1 : 2);
+#else
+ NOTICE("Running in AArch32 HYP mode\n");
+#endif
+
+ tftf_arch_setup();
+ tftf_platform_setup();
+ tftf_init_topology();
+
+ tftf_irq_setup();
+
+ rc = tftf_initialise_timer();
+ if (rc != 0) {
+ ERROR("Failed to initialize the timer subsystem (%d).\n", rc);
+ tftf_exit();
+ }
+
+ /* Enable the SGI used by the timer management framework */
+ tftf_irq_enable(IRQ_WAKE_SGI, GIC_HIGHEST_NS_PRIORITY);
+ enable_irq();
+
+ if (new_test_session()) {
+ NOTICE("Starting a new test session\n");
+ status = tftf_init_nvm();
+ if (status != STATUS_SUCCESS) {
+ /*
+ * TFTF will have an undetermined behavior if its data
+ * structures have not been initialised. There's no
+ * point in continuing execution.
+ */
+ ERROR("FATAL: Failed to initialise internal data structures in NVM.\n");
+ tftf_clean_nvm();
+ tftf_exit();
+ }
+ } else {
+ NOTICE("Resuming interrupted test session\n");
+ rc = resume_test_session();
+ if (rc < 0) {
+ tftf_report_generate();
+ tftf_clean_nvm();
+ tftf_exit();
+ }
+ }
+
+ /* Initialise the CPUs status map */
+ tftf_init_cpus_status_map();
+
+ /*
+ * Detect power state format and get power state information for
+ * a platform.
+ */
+ tftf_init_pstate_framework();
+
+ /* The lead CPU is always the primary core. */
+ lead_cpu_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /*
+ * Hand over to lead CPU if required.
+ * If the primary CPU is not the lead CPU for the first test then:
+ * 1) Power on the lead CPU
+ * 2) Power down the primary CPU
+ */
+ if ((read_mpidr_el1() & MPID_MASK) != lead_cpu_mpid) {
+ hand_over_to_lead_cpu();
+ bug_unreachable();
+ }
+
+ /* Enter the test session */
+ run_tests();
+
+ /* Should never reach this point */
+ bug_unreachable();
+}
+
+void __dead2 tftf_exit(void)
+{
+ NOTICE("Exiting tests.\n");
+
+ /* Let the platform code clean up if required */
+ tftf_platform_end();
+
+ while (1)
+ wfi();
+}
diff --git a/tftf/framework/nvm_results_helpers.c b/tftf/framework/nvm_results_helpers.c
new file mode 100644
index 0000000..34ef19f
--- /dev/null
+++ b/tftf/framework/nvm_results_helpers.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <nvm.h>
+#include <platform.h>
+#include <spinlock.h>
+#include <stdarg.h>
+#include <string.h>
+
+/*
+ * Temporary buffer to store 1 test output.
+ * This will eventually be saved into NVM at the end of the execution
+ * of this test.
+ */
+static char testcase_output[TESTCASE_OUTPUT_MAX_SIZE];
+/*
+ * A test output can be written in several pieces by calling
+ * tftf_testcase_printf() multiple times. testcase_output_idx keeps the position
+ * of the last character written in testcase_output buffer and allows to easily
+ * append a new string at next call to tftf_testcase_printf().
+ */
+static unsigned int testcase_output_idx;
+
+/* Lock to avoid concurrent accesses to the testcase output buffer */
+static spinlock_t testcase_output_lock;
+
+static tftf_state_t tftf_init_state = {
+ .build_message = "",
+ .test_to_run = {
+ .testsuite_idx = 0,
+ .testcase_idx = 0,
+ },
+ .test_progress = TEST_READY,
+ .testcase_buffer = { 0 },
+ .testcase_results = {
+ {
+ .result = TEST_RESULT_NA,
+ .duration = 0,
+ .output_offset = 0,
+ .output_size = 0,
+ }
+ },
+ .result_buffer_size = 0,
+ .result_buffer = NULL,
+};
+
+unsigned int new_test_session(void)
+{
+/* NEW_TEST_SESSION == 1 => we always want to start a new session */
+#if NEW_TEST_SESSION
+ INFO("Always starting a new test session (NEW_TEST_SESSION == 1)\n");
+ return 1;
+#endif
+ char saved_build_msg[BUILD_MESSAGE_SIZE];
+
+ /*
+ * Check the validity of the build message stored in NVM.
+ * It is invalid when it doesn't match with the TFTF binary currently
+ * executing.
+ */
+ tftf_nvm_read(TFTF_STATE_OFFSET(build_message), saved_build_msg,
+ BUILD_MESSAGE_SIZE);
+ return !!strncmp(build_message, saved_build_msg, BUILD_MESSAGE_SIZE);
+}
+
+STATUS tftf_init_nvm(void)
+{
+ INFO("Initialising NVM\n");
+
+ /* Copy the build message to identify the TFTF */
+ strncpy(tftf_init_state.build_message, build_message, BUILD_MESSAGE_SIZE);
+ return tftf_nvm_write(0, &tftf_init_state, sizeof(tftf_init_state));
+}
+
+STATUS tftf_clean_nvm(void)
+{
+ unsigned char corrupt_build_message = '\0';
+
+ /*
+ * This will cause TFTF to re-initialise its data structures next time
+ * it runs.
+ */
+ return tftf_nvm_write(TFTF_STATE_OFFSET(build_message),
+ &corrupt_build_message,
+ sizeof(corrupt_build_message));
+}
+
+STATUS tftf_set_test_to_run(const test_ref_t test_to_run)
+{
+ return tftf_nvm_write(TFTF_STATE_OFFSET(test_to_run), &test_to_run,
+ sizeof(test_to_run));
+}
+
+STATUS tftf_get_test_to_run(test_ref_t *test_to_run)
+{
+ assert(test_to_run != NULL);
+ return tftf_nvm_read(TFTF_STATE_OFFSET(test_to_run), test_to_run,
+ sizeof(*test_to_run));
+}
+
+STATUS tftf_set_test_progress(test_progress_t test_progress)
+{
+ return tftf_nvm_write(TFTF_STATE_OFFSET(test_progress), &test_progress,
+ sizeof(test_progress));
+}
+
+STATUS tftf_get_test_progress(test_progress_t *test_progress)
+{
+ assert(test_progress != NULL);
+ return tftf_nvm_read(TFTF_STATE_OFFSET(test_progress), test_progress,
+ sizeof(*test_progress));
+}
+
+STATUS tftf_testcase_set_result(const test_case_t *testcase,
+ test_result_t result,
+ unsigned long long duration)
+{
+ STATUS status;
+ unsigned result_buffer_size = 0;
+ TESTCASE_RESULT test_result;
+
+ assert(testcase != NULL);
+
+ /* Initialize Test case result */
+ test_result.result = result;
+ test_result.duration = duration;
+ test_result.output_offset = 0;
+ test_result.output_size = strlen(testcase_output);
+
+ /* Does the test have an output? */
+ if (test_result.output_size != 0) {
+ /* Get the size of the buffer containing all tests outputs */
+ status = tftf_nvm_read(TFTF_STATE_OFFSET(result_buffer_size),
+ &result_buffer_size, sizeof(unsigned));
+ if (status != STATUS_SUCCESS)
+ goto reset_test_output;
+
+ /*
+ * Write the output buffer at the end of the string buffer in
+ * NVM
+ */
+ test_result.output_offset = result_buffer_size;
+ status = tftf_nvm_write(
+ TFTF_STATE_OFFSET(result_buffer) + result_buffer_size,
+ testcase_output, test_result.output_size + 1);
+ if (status != STATUS_SUCCESS)
+ goto reset_test_output;
+
+ /* And update the buffer size into NVM */
+ result_buffer_size += test_result.output_size + 1;
+ status = tftf_nvm_write(TFTF_STATE_OFFSET(result_buffer_size),
+ &result_buffer_size, sizeof(unsigned));
+ if (status != STATUS_SUCCESS)
+ goto reset_test_output;
+ }
+
+ /* Write the test result into NVM */
+ status = tftf_nvm_write(TFTF_STATE_OFFSET(testcase_results) +
+ (testcase->index * sizeof(TESTCASE_RESULT)),
+ &test_result, sizeof(TESTCASE_RESULT));
+
+reset_test_output:
+ /* Reset test output buffer for the next test */
+ testcase_output_idx = 0;
+ testcase_output[0] = 0;
+
+ return status;
+}
+
+STATUS tftf_testcase_get_result(const test_case_t *testcase,
+ TESTCASE_RESULT *result,
+ char *test_output)
+{
+ STATUS status;
+ unsigned output_size;
+
+ assert(testcase != NULL);
+ assert(result != NULL);
+ assert(test_output != NULL);
+
+ status = tftf_nvm_read(TFTF_STATE_OFFSET(testcase_results)
+ + (testcase->index * sizeof(TESTCASE_RESULT)),
+ result, sizeof(TESTCASE_RESULT));
+ if (status != STATUS_SUCCESS) {
+ return status;
+ }
+
+ output_size = result->output_size;
+
+ if (output_size != 0) {
+ status = tftf_nvm_read(TFTF_STATE_OFFSET(result_buffer)
+ + result->output_offset,
+ test_output, output_size);
+ if (status != STATUS_SUCCESS)
+ return status;
+ }
+
+ test_output[output_size] = 0;
+
+ return STATUS_SUCCESS;
+}
+
+int tftf_testcase_printf(const char *format, ...)
+{
+ va_list ap;
+ int available;
+ int written = -1;
+
+ spin_lock(&testcase_output_lock);
+
+ assert(sizeof(testcase_output) >= testcase_output_idx);
+ available = sizeof(testcase_output) - testcase_output_idx;
+ if (available == 0) {
+ ERROR("%s: Output buffer is full ; the string won't be printed.\n",
+ __func__);
+ ERROR("%s: Consider increasing TESTCASE_OUTPUT_MAX_SIZE value.\n",
+ __func__);
+ goto release_lock;
+ }
+
+ va_start(ap, format);
+ written = vsnprintf(&testcase_output[testcase_output_idx], available,
+ format, ap);
+ va_end(ap);
+
+ if (written < 0) {
+ ERROR("%s: Output error (%d)", __func__, written);
+ goto release_lock;
+ }
+ /*
+ * If vsnprintf() truncated the string due to the size limit passed as
+ * an argument then its return value is the number of characters (not
+ * including the trailing '\0') which would have been written to the
+ * final string if enough space had been available. Thus, a return value
+ * of size or more means that the output was truncated.
+ *
+ * Adjust the value of 'written' to reflect what has been actually
+ * written.
+ */
+ if (written >= available) {
+ ERROR("%s: String has been truncated (%u/%u bytes written).\n",
+ __func__, available - 1, written);
+ ERROR("%s: Consider increasing TESTCASE_OUTPUT_MAX_SIZE value.\n",
+ __func__);
+ written = available - 1;
+ }
+
+ /*
+ * Update testcase_output_idx to point to the '\0' of the buffer.
+ * The next call of tftf_testcase_printf() will overwrite '\0' to
+ * append its new string to the buffer.
+ */
+ testcase_output_idx += written;
+
+release_lock:
+ spin_unlock(&testcase_output_lock);
+ return written;
+}
+
+void tftf_notify_reboot(void)
+{
+#if DEBUG
+ /* This function must be called by tests, not by the framework */
+ test_progress_t test_progress;
+ tftf_get_test_progress(&test_progress);
+ assert(test_progress == TEST_IN_PROGRESS);
+#endif /* DEBUG */
+
+ VERBOSE("Test intends to reset\n");
+ tftf_set_test_progress(TEST_REBOOTING);
+}
diff --git a/tftf/framework/report.c b/tftf/framework/report.c
new file mode 100644
index 0000000..4b0a857
--- /dev/null
+++ b/tftf/framework/report.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <platform_def.h> /* For TESTCASE_OUTPUT_MAX_SIZE */
+#include <semihosting.h>
+#include <stdio.h>
+#include <string.h>
+#include <tftf.h>
+
+struct tftf_report_ops {
+ long (*open)(const char *fname);
+ void (*write)(long handle, const char *str);
+ void (*close)(long handle);
+};
+
+#define TEST_REPORT_JUNIT_FILENAME "tftf_report_junit.xml"
+#define TEST_REPORT_RAW_FILENAME "tftf_report_raw.txt"
+
+#if defined(TEST_REPORT_UART_RAW) || defined(TEST_REPORT_UART_JUNIT)
+static long tftf_report_uart_open(const char *fname)
+{
+ printf("********** %s **********\n", fname);
+ return 0;
+}
+
+static void tftf_report_uart_write(long handle, const char *str)
+{
+ (void)handle;
+ assert(str);
+ /* Not using printf to avoid doing two copies. */
+ while (*str) {
+ putchar(*str++);
+ }
+}
+
+static void tftf_report_uart_close(long handle)
+{
+ (void)handle;
+ printf("************************\n");
+}
+
+const struct tftf_report_ops tftf_report_uart_ops = {
+ .open = tftf_report_uart_open,
+ .write = tftf_report_uart_write,
+ .close = tftf_report_uart_close,
+};
+#endif /* defined(TEST_REPORT_UART_RAW) || defined(TEST_REPORT_UART_JUNIT) */
+
+#if defined(TEST_REPORT_UART_RAW) || defined(TEST_REPORT_SEMIHOSTING_RAW)
+static unsigned int total_tests;
+static unsigned int tests_stats[TEST_RESULT_MAX];
+
+static void tftf_update_tests_statistics(test_result_t result)
+{
+ assert(TEST_RESULT_IS_VALID(result));
+ total_tests++;
+ tests_stats[result]++;
+}
+
+static const char *test_result_strings[TEST_RESULT_MAX] = {
+ "Skipped", "Passed", "Failed", "Crashed",
+};
+
+const char *test_result_to_string(test_result_t result)
+{
+ assert(TEST_RESULT_IS_VALID(result));
+ return test_result_strings[result];
+}
+
+static void tftf_report_generate_raw(const struct tftf_report_ops *rops,
+ const char *fname)
+{
+#define WRITE(str) rops->write(file_handle, str)
+#define BUFFER_SIZE 200
+ unsigned i, j;
+ long file_handle;
+ char buffer[BUFFER_SIZE];
+ const test_case_t *testcases;
+ TESTCASE_RESULT testcase_result;
+ char test_output[TESTCASE_OUTPUT_MAX_SIZE];
+ STATUS status;
+
+ file_handle = rops->open(fname);
+ if (file_handle == -1)
+ return;
+
+ /* Extract the result of all the testcases */
+ WRITE("========== TEST REPORT ==========\n");
+ for (i = 0; testsuites[i].name != NULL; i++) {
+ snprintf(buffer, BUFFER_SIZE, "# Test suite '%s':\n", testsuites[i].name);
+ WRITE(buffer);
+ testcases = testsuites[i].testcases;
+
+ for (j = 0; testcases[j].name != NULL; j++) {
+ status = tftf_testcase_get_result(&testcases[j], &testcase_result, test_output);
+ if (status != STATUS_SUCCESS) {
+ WRITE("Failed to get test result.\n");
+ continue;
+ }
+
+ tftf_update_tests_statistics(testcase_result.result);
+ /* TODO: print test duration */
+ snprintf(buffer, BUFFER_SIZE, "\t - %s: %s\n", testcases[j].name,
+ test_result_to_string(testcase_result.result));
+ WRITE(buffer);
+
+ if (strlen(test_output) != 0) {
+ WRITE("--- output ---\n");
+ snprintf(buffer, BUFFER_SIZE, "%s", test_output);
+ WRITE(buffer);
+ WRITE("--------------\n");
+ }
+ }
+ }
+ WRITE("=================================\n");
+
+ for (i = TEST_RESULT_MIN; i < TEST_RESULT_MAX; i++) {
+ snprintf(buffer, BUFFER_SIZE, "Tests %-8s: %d\n",
+ test_result_to_string(i), tests_stats[i]);
+ WRITE(buffer);
+ }
+ snprintf(buffer, BUFFER_SIZE, "%-14s: %d\n", "Total tests", total_tests);
+ WRITE(buffer);
+ WRITE("=================================\n");
+
+ rops->close(file_handle);
+#undef BUFFER_SIZE
+#undef WRITE
+}
+#endif /* defined(TEST_REPORT_UART_RAW) || defined(TEST_REPORT_SEMIHOSTING_RAW) */
+
+#if defined(TEST_REPORT_SEMIHOSTING_RAW) || defined(TEST_REPORT_SEMIHOSTING_JUNIT)
+static long tftf_report_semihosting_open(const char *fname)
+{
+ /* Create the report on the semihosting */
+ long handle = semihosting_file_open(fname, FOPEN_MODE_WPLUS);
+ if (handle == -1) {
+ ERROR("Failed to create test report file \"%s\" on semihosting"
+ " [status = %ld].\n", fname, handle);
+ }
+ NOTICE("Opened file \"%s\" on semihosting with handle %ld.\n", fname, handle);
+ return handle;
+}
+
+static void tftf_report_semihosting_write(long handle, const char *str)
+{
+ size_t length = strlen(str);
+ semihosting_file_write(handle, &length, (const uintptr_t) str);
+}
+
+static void tftf_report_semihosting_close(long handle)
+{
+ semihosting_file_close(handle);
+ NOTICE("Closing file with handle %ld on semihosting.\n", handle);
+}
+
+const struct tftf_report_ops tftf_report_semihosting_ops = {
+ .open = tftf_report_semihosting_open,
+ .write = tftf_report_semihosting_write,
+ .close = tftf_report_semihosting_close,
+};
+#endif /* defined(TEST_REPORT_SEMIHOSTING_RAW) || defined(TEST_REPORT_SEMIHOSTING_JUNIT) */
+
+
+#if defined(TEST_REPORT_UART_JUNIT) || defined(TEST_REPORT_SEMIHOSTING_JUNIT)
+static void tftf_report_generate_junit(const struct tftf_report_ops *rops,
+ const char *fname)
+{
+#define WRITE(str) rops->write(file_handle, str)
+#define BUFFER_SIZE 200
+
+ long file_handle;
+ unsigned i, j;
+ const test_case_t *testcases;
+ TESTCASE_RESULT result;
+ char buffer[BUFFER_SIZE];
+ char test_output[TESTCASE_OUTPUT_MAX_SIZE];
+
+ file_handle = rops->open(fname);
+
+ if (file_handle == -1) {
+ return;
+ }
+ WRITE("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
+ WRITE("<testsuites>\n");
+
+ /* Extract the result of all the testcases */
+ for (i = 0; testsuites[i].name != NULL; i++) {
+ snprintf(buffer, BUFFER_SIZE, "<testsuite name=\"%s\">\n",
+ testsuites[i].name);
+ WRITE(buffer);
+ testcases = testsuites[i].testcases;
+ for (j = 0; testcases[j].name != NULL; j++) {
+ tftf_testcase_get_result(&testcases[j], &result, test_output);
+
+ snprintf(buffer, BUFFER_SIZE, " <testcase name=\"%s\" time=\"%llu\"",
+ testcases[j].name, result.duration);
+ WRITE(buffer);
+ if (result.result == TEST_RESULT_SUCCESS) {
+ WRITE("/>\n");
+ } else {
+ WRITE(">\n");
+ if (result.result == TEST_RESULT_SKIPPED) {
+ WRITE(" <skipped/>\n");
+ } else {
+ WRITE(" <error type=\"failed\">\n");
+ WRITE(test_output);
+ WRITE(" </error>\n");
+ }
+ WRITE(" </testcase>\n");
+ }
+ }
+ WRITE("</testsuite>\n");
+ }
+
+ WRITE("</testsuites>\n");
+ rops->close(file_handle);
+#undef BUFFER_SIZE
+#undef WRITE
+}
+#endif /* defined(TEST_REPORT_UART_JUNIT) || defined(TEST_REPORT_SEMIHOSTING_JUNIT) */
+
+void tftf_report_generate(void)
+{
+ int nb_reports = 0;
+#ifdef TEST_REPORT_UART_RAW
+ tftf_report_generate_raw(&tftf_report_uart_ops, "raw");
+ nb_reports++;
+#endif
+#ifdef TEST_REPORT_UART_JUNIT
+ tftf_report_generate_junit(&tftf_report_uart_ops, "junit");
+ nb_reports++;
+#endif
+#ifdef TEST_REPORT_SEMIHOSTING_RAW
+ tftf_report_generate_raw(&tftf_report_semihosting_ops,
+ TEST_REPORT_RAW_FILENAME);
+ nb_reports++;
+#endif
+#ifdef TEST_REPORT_SEMIHOSTING_JUNIT
+ tftf_report_generate_junit(&tftf_report_semihosting_ops,
+ TEST_REPORT_JUNIT_FILENAME);
+ nb_reports++;
+#endif
+ assert(nb_reports > 0);
+}
diff --git a/tftf/framework/tftf.ld.S b/tftf/framework/tftf.ld.S
new file mode 100644
index 0000000..9432a74
--- /dev/null
+++ b/tftf/framework/tftf.ld.S
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+ENTRY(tftf_entrypoint)
+
+MEMORY {
+ RAM (rwx): ORIGIN = DRAM_BASE, LENGTH = DRAM_SIZE
+}
+
+
+SECTIONS
+{
+ . = TFTF_BASE;
+ __TFTF_BASE__ = .;
+
+ ro . : {
+ __RO_START__ = .;
+ *entrypoint.o(.text*)
+ *(.text*)
+ *(.rodata*)
+ *(.vectors)
+ __RO_END_UNALIGNED__ = .;
+ /*
+ * Memory page(s) mapped to this section will be marked as
+ * read-only, executable. No RW data from the next section must
+ * creep in. Ensure the rest of the current memory page is unused.
+ */
+ . = NEXT(PAGE_SIZE);
+ __RO_END__ = .;
+ } >RAM
+
+ .data : {
+ __DATA_START__ = .;
+ *(.data*)
+ __DATA_END__ = .;
+ } >RAM
+
+ stacks (NOLOAD) : {
+ __STACKS_START__ = .;
+ *(tftf_normal_stacks)
+ __STACKS_END__ = .;
+ } >RAM
+
+ /*
+ * The .bss section gets initialised to 0 at runtime.
+ * Its base address must be 16-byte aligned.
+ */
+ .bss : ALIGN(16) {
+ __BSS_START__ = .;
+ *(SORT_BY_ALIGNMENT(.bss*))
+ *(COMMON)
+ __BSS_END__ = .;
+ } >RAM
+
+ /*
+ * The xlat_table section is for full, aligned page tables (4K).
+ * Removing them from .bss avoids forcing 4K alignment on
+ * the .bss section and eliminates the unecessary zero init
+ */
+ xlat_table (NOLOAD) : {
+ *(xlat_table)
+ } >RAM
+
+ /*
+ * The base address of the coherent memory section must be page-aligned (4K)
+ * to guarantee that the coherent data are stored on their own pages and
+ * are not mixed with normal data. This is required to set up the correct
+ * memory attributes for the coherent data page tables.
+ */
+ coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+ __COHERENT_RAM_START__ = .;
+ *(tftf_coherent_stacks)
+ *(tftf_coherent_mem)
+ __COHERENT_RAM_END_UNALIGNED__ = .;
+ /*
+ * Memory page(s) mapped to this section will be marked
+ * as device memory. No other unexpected data must creep in.
+ * Ensure the rest of the current memory page is unused.
+ */
+ . = NEXT(PAGE_SIZE);
+ __COHERENT_RAM_END__ = .;
+ } >RAM
+
+ __COHERENT_RAM_UNALIGNED_SIZE__ =
+ __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
+
+
+ __TFTF_END__ = .;
+
+ __BSS_SIZE__ = SIZEOF(.bss);
+
+}
diff --git a/tftf/framework/timer/timer_framework.c b/tftf/framework/timer/timer_framework.c
new file mode 100644
index 0000000..e5e9a0f
--- /dev/null
+++ b/tftf/framework/timer/timer_framework.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <errno.h>
+#include <irq.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <sgi.h>
+#include <spinlock.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <tftf.h>
+#include <timer.h>
+
+
+/* Helper macros */
+#define TIMER_STEP_VALUE (plat_timer_info->timer_step_value)
+#define TIMER_IRQ (plat_timer_info->timer_irq)
+#define PROGRAM_TIMER(a) plat_timer_info->program(a)
+#define INVALID_CORE UINT32_MAX
+#define INVALID_TIME UINT64_MAX
+#define MAX_TIME_OUT_MS 10000
+
+/*
+ * Pointer containing available timer information for the platform.
+ */
+static const plat_timer_t *plat_timer_info;
+/*
+ * Interrupt requested time by cores in terms of absolute time.
+ */
+static volatile unsigned long long interrupt_req_time[PLATFORM_CORE_COUNT];
+/*
+ * Contains the target core number of the timer interrupt.
+ */
+static unsigned int current_prog_core = INVALID_CORE;
+/*
+ * Lock to get a consistent view for programming the timer
+ */
+static spinlock_t timer_lock;
+/*
+ * Number of system ticks per millisec
+ */
+static unsigned int systicks_per_ms;
+
+/*
+ * Stores per CPU timer handler invoked on expiration of the requested timeout.
+ */
+static irq_handler_t timer_handler[PLATFORM_CORE_COUNT];
+
+/* Helper function */
+static inline unsigned long long get_current_time_ms(void)
+{
+ assert(systicks_per_ms);
+ return mmio_read_64(SYS_CNT_BASE1 + CNTPCT_LO) / systicks_per_ms;
+}
+
+static inline unsigned long long get_current_prog_time(void)
+{
+ return current_prog_core == INVALID_CORE ?
+ 0 : interrupt_req_time[current_prog_core];
+}
+
+int tftf_initialise_timer(void)
+{
+ int rc;
+ unsigned int i;
+
+ /*
+ * Get platform specific timer information
+ */
+ rc = plat_initialise_timer_ops(&plat_timer_info);
+ if (rc) {
+ ERROR("%s %d: No timer data found\n", __func__, __LINE__);
+ return rc;
+ }
+
+ /* Systems can't support single tick as a step value */
+ assert(TIMER_STEP_VALUE);
+
+ /* Initialise the array to max possible time */
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+ interrupt_req_time[i] = INVALID_TIME;
+
+ tftf_irq_register_handler(TIMER_IRQ, tftf_timer_framework_handler);
+ arm_gic_set_intr_priority(TIMER_IRQ, GIC_HIGHEST_NS_PRIORITY);
+ arm_gic_intr_enable(TIMER_IRQ);
+
+ /* Save the systicks per millisecond */
+ systicks_per_ms = read_cntfrq_el0() / 1000;
+
+ return 0;
+}
+
+/*
+ * It returns the core number of next timer request to be serviced or
+ * -1 if there is no request from any core. The next service request
+ * is the core whose interrupt needs to be fired first.
+ */
+static inline unsigned int get_lowest_req_core(void)
+{
+ unsigned long long lowest_timer = INVALID_TIME;
+ unsigned int lowest_core_req = INVALID_CORE;
+ unsigned int i;
+
+ /*
+ * If 2 cores requested same value, give precedence
+ * to the core with lowest core number
+ */
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if (interrupt_req_time[i] < lowest_timer) {
+ lowest_timer = interrupt_req_time[i];
+ lowest_core_req = i;
+ }
+ }
+
+ return lowest_core_req;
+}
+
+int tftf_program_timer(unsigned long time_out_ms)
+{
+ unsigned int core_pos;
+ unsigned long long current_time;
+ u_register_t flags;
+ int rc = 0;
+
+ /*
+ * Some timer implementations have a very small max timeouts due to
+ * this if a request is asked for greater than the max time supported
+ * by them either it has to be broken down and remembered or use
+ * some other technique. Since that use case is not intended and
+ * and to make the timer framework simple, max timeout requests
+ * accepted by timer implementations can't be greater than
+ * 10 seconds. Hence, all timer peripherals used in timer framework
+ * has to support a timeout with interval of at least MAX_TIMEOUT.
+ */
+ if ((time_out_ms > MAX_TIME_OUT_MS) || (time_out_ms == 0)) {
+ ERROR("%s : Greater than max timeout request\n", __func__);
+ return -1;
+ } else if (time_out_ms < TIMER_STEP_VALUE) {
+ time_out_ms = TIMER_STEP_VALUE;
+ }
+
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ /* A timer interrupt request is already available for the core */
+ assert(interrupt_req_time[core_pos] == INVALID_TIME);
+
+ flags = read_daif();
+ disable_irq();
+ spin_lock(&timer_lock);
+
+ assert((current_prog_core < PLATFORM_CORE_COUNT) ||
+ (current_prog_core == INVALID_CORE));
+
+ /*
+ * Read time after acquiring timer_lock to account for any time taken
+ * by lock contention.
+ */
+ current_time = get_current_time_ms();
+
+ /* Update the requested time */
+ interrupt_req_time[core_pos] = current_time + time_out_ms;
+
+ VERBOSE("Need timer interrupt at: %lld current_prog_time:%lld\n"
+ " current time: %lld\n", interrupt_req_time[core_pos],
+ get_current_prog_time(),
+ get_current_time_ms());
+
+ /*
+ * If the interrupt request time is less than the current programmed
+ * by timer_step_value or timer is not programmed. Program it with
+ * requested time and retarget the timer interrupt to the current
+ * core.
+ */
+ if ((!get_current_prog_time()) || (interrupt_req_time[core_pos] <
+ (get_current_prog_time() - TIMER_STEP_VALUE))) {
+
+ arm_gic_set_intr_target(TIMER_IRQ, core_pos);
+
+ rc = PROGRAM_TIMER(time_out_ms);
+ /* We don't expect timer programming to fail */
+ if (rc)
+ ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
+
+ current_prog_core = core_pos;
+ }
+
+ spin_unlock(&timer_lock);
+ /* Restore DAIF flags */
+ write_daif(flags);
+ isb();
+
+ return rc;
+}
+
+int tftf_program_timer_and_suspend(unsigned long milli_secs,
+ unsigned int pwr_state,
+ int *timer_rc, int *suspend_rc)
+{
+ int rc = 0;
+ u_register_t flags;
+
+ /* Default to successful return codes */
+ int timer_rc_val = 0;
+ int suspend_rc_val = PSCI_E_SUCCESS;
+
+ /* Preserve DAIF flags. IRQs need to be disabled for this to work. */
+ flags = read_daif();
+ disable_irq();
+
+ /*
+ * Even with IRQs masked, the timer IRQ will wake the CPU up.
+ *
+ * If the timer IRQ happens before entering suspend mode (because the
+ * timer took too long to program, for example) the fact that the IRQ is
+ * pending will prevent the CPU from entering suspend mode and not being
+ * able to wake up.
+ */
+ timer_rc_val = tftf_program_timer(milli_secs);
+ if (timer_rc_val == 0) {
+ suspend_rc_val = tftf_cpu_suspend(pwr_state);
+ if (suspend_rc_val != PSCI_E_SUCCESS) {
+ rc = -1;
+ INFO("%s %d: suspend_rc = %d\n", __func__, __LINE__,
+ suspend_rc_val);
+ }
+ } else {
+ rc = -1;
+ INFO("%s %d: timer_rc = %d\n", __func__, __LINE__, timer_rc_val);
+ }
+
+ /* Restore previous DAIF flags */
+ write_daif(flags);
+ isb();
+
+ if (timer_rc)
+ *timer_rc = timer_rc_val;
+ if (suspend_rc)
+ *suspend_rc = suspend_rc_val;
+ /*
+ * If IRQs were disabled when calling this function, the timer IRQ
+ * handler won't be called and the timer interrupt will be pending, but
+ * that isn't necessarily a problem.
+ */
+
+ return rc;
+}
+
+int tftf_program_timer_and_sys_suspend(unsigned long milli_secs,
+ int *timer_rc, int *suspend_rc)
+{
+ int rc = 0;
+ u_register_t flags;
+
+ /* Default to successful return codes */
+ int timer_rc_val = 0;
+ int suspend_rc_val = PSCI_E_SUCCESS;
+
+ /* Preserve DAIF flags. IRQs need to be disabled for this to work. */
+ flags = read_daif();
+ disable_irq();
+
+ /*
+ * Even with IRQs masked, the timer IRQ will wake the CPU up.
+ *
+ * If the timer IRQ happens before entering suspend mode (because the
+ * timer took too long to program, for example) the fact that the IRQ is
+ * pending will prevent the CPU from entering suspend mode and not being
+ * able to wake up.
+ */
+ timer_rc_val = tftf_program_timer(milli_secs);
+ if (timer_rc_val == 0) {
+ suspend_rc_val = tftf_system_suspend();
+ if (suspend_rc_val != PSCI_E_SUCCESS) {
+ rc = -1;
+ INFO("%s %d: suspend_rc = %d\n", __func__, __LINE__,
+ suspend_rc_val);
+ }
+ } else {
+ rc = -1;
+ INFO("%s %d: timer_rc = %d\n", __func__, __LINE__, timer_rc_val);
+ }
+
+ /* Restore previous DAIF flags */
+ write_daif(flags);
+ isb();
+
+ /*
+ * If IRQs were disabled when calling this function, the timer IRQ
+ * handler won't be called and the timer interrupt will be pending, but
+ * that isn't necessarily a problem.
+ */
+ if (timer_rc)
+ *timer_rc = timer_rc_val;
+ if (suspend_rc)
+ *suspend_rc = suspend_rc_val;
+
+ return rc;
+}
+
+int tftf_timer_sleep(unsigned long milli_secs)
+{
+ int ret, power_state;
+ uint32_t stateid;
+
+ ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
+ PSTATE_TYPE_STANDBY, &stateid);
+ if (ret != PSCI_E_SUCCESS)
+ return -1;
+
+ power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0, PSTATE_TYPE_STANDBY,
+ stateid);
+ ret = tftf_program_timer_and_suspend(milli_secs, power_state,
+ NULL, NULL);
+ if (ret != 0)
+ return -1;
+
+ return 0;
+}
+
+int tftf_cancel_timer(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int next_timer_req_core_pos;
+ unsigned long long current_time;
+ u_register_t flags;
+ int rc = 0;
+
+ /*
+ * IRQ is disabled so that if a timer is fired after taking a lock,
+ * it will remain pending and a core does not hit IRQ handler trying
+ * to acquire an already locked spin_lock causing dead lock.
+ */
+ flags = read_daif();
+ disable_irq();
+ spin_lock(&timer_lock);
+
+ interrupt_req_time[core_pos] = INVALID_TIME;
+
+ if (core_pos == current_prog_core) {
+ /*
+ * Cancel the programmed interrupt at the peripheral. If the
+ * timer interrupt is level triggered and fired this also
+ * deactivates the pending interrupt.
+ */
+ rc = plat_timer_info->cancel();
+ /* We don't expect cancel timer to fail */
+ if (rc) {
+ ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
+ goto exit;
+ }
+
+ /*
+ * For edge triggered interrupts, if an IRQ is fired before
+ * cancel timer is executed, the signal remains pending. So,
+ * clear the Timer IRQ if it is already pending.
+ */
+ if (arm_gic_is_intr_pending(TIMER_IRQ))
+ arm_gic_intr_clear(TIMER_IRQ);
+
+ /* Get next timer consumer */
+ next_timer_req_core_pos = get_lowest_req_core();
+ if (next_timer_req_core_pos != INVALID_CORE) {
+
+ /* Retarget to the next_timer_req_core_pos */
+ arm_gic_set_intr_target(TIMER_IRQ, next_timer_req_core_pos);
+ current_prog_core = next_timer_req_core_pos;
+
+ current_time = get_current_time_ms();
+
+ /*
+ * If the next timer request is lesser than or in a
+ * window of TIMER_STEP_VALUE from current time,
+ * program it to fire after TIMER_STEP_VALUE.
+ */
+ if (interrupt_req_time[next_timer_req_core_pos] >
+ current_time + TIMER_STEP_VALUE)
+ rc = PROGRAM_TIMER(interrupt_req_time[next_timer_req_core_pos] - current_time);
+ else
+ rc = PROGRAM_TIMER(TIMER_STEP_VALUE);
+ VERBOSE("Cancel and program new timer for core_pos: "
+ "%d %lld\n",
+ next_timer_req_core_pos,
+ get_current_prog_time());
+ /* We don't expect timer programming to fail */
+ if (rc)
+ ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
+ } else {
+ current_prog_core = INVALID_CORE;
+ VERBOSE("Cancelling timer : %d\n", core_pos);
+ }
+ }
+exit:
+ spin_unlock(&timer_lock);
+
+ /* Restore DAIF flags */
+ write_daif(flags);
+ isb();
+
+ return rc;
+}
+
+int tftf_timer_framework_handler(void *data)
+{
+ unsigned int handler_core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int next_timer_req_core_pos;
+ unsigned long long current_time;
+ int rc = 0;
+
+ assert(interrupt_req_time[handler_core_pos] != INVALID_TIME);
+ spin_lock(&timer_lock);
+
+ current_time = get_current_time_ms();
+ /* Check if we interrupt is targeted correctly */
+ assert(handler_core_pos == current_prog_core);
+
+ interrupt_req_time[handler_core_pos] = INVALID_TIME;
+
+ /* Execute the driver handler */
+ if (plat_timer_info->handler)
+ plat_timer_info->handler();
+
+ if (arm_gic_is_intr_pending(TIMER_IRQ)) {
+ /*
+ * We might never manage to acquire the printf lock here
+ * (because we are in ISR context) but we're gonna panic right
+ * after anyway so it doesn't really matter.
+ */
+ ERROR("Timer IRQ still pending. Fatal error.\n");
+ panic();
+ }
+
+ /*
+ * Execute the handler requested by the core, the handlers for the
+ * other cores will be executed as part of handling IRQ_WAKE_SGI.
+ */
+ if (timer_handler[handler_core_pos])
+ timer_handler[handler_core_pos](data);
+
+ /* Send interrupts to all the CPUS in the min time block */
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if ((interrupt_req_time[i] <=
+ (current_time + TIMER_STEP_VALUE))) {
+ interrupt_req_time[i] = INVALID_TIME;
+ tftf_send_sgi(IRQ_WAKE_SGI, i);
+ }
+ }
+
+ /* Get the next lowest requested timer core and program it */
+ next_timer_req_core_pos = get_lowest_req_core();
+ if (next_timer_req_core_pos != INVALID_CORE) {
+ /* Check we have not exceeded the time for next core */
+ assert(interrupt_req_time[next_timer_req_core_pos] >
+ current_time);
+ arm_gic_set_intr_target(TIMER_IRQ, next_timer_req_core_pos);
+ rc = PROGRAM_TIMER(interrupt_req_time[next_timer_req_core_pos]
+ - current_time);
+ }
+ /* Update current program core to the newer one */
+ current_prog_core = next_timer_req_core_pos;
+
+ spin_unlock(&timer_lock);
+
+ return rc;
+}
+
+int tftf_timer_register_handler(irq_handler_t irq_handler)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ int ret;
+
+ /* Validate no handler is registered */
+ assert(!timer_handler[core_pos]);
+ timer_handler[core_pos] = irq_handler;
+
+ /*
+ * Also register same handler to IRQ_WAKE_SGI, as it can be waken
+ * by it.
+ */
+ ret = tftf_irq_register_handler(IRQ_WAKE_SGI, irq_handler);
+ assert(!ret);
+
+ return ret;
+}
+
+int tftf_timer_unregister_handler(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ int ret;
+
+ /*
+ * Unregister the handler for IRQ_WAKE_SGI also
+ */
+ ret = tftf_irq_unregister_handler(IRQ_WAKE_SGI);
+ assert(!ret);
+ /* Validate a handler is registered */
+ assert(timer_handler[core_pos]);
+ timer_handler[core_pos] = 0;
+
+ return ret;
+}
+
+unsigned int tftf_get_timer_irq(void)
+{
+ /*
+ * Check if the timer info is initialised
+ */
+ assert(TIMER_IRQ);
+ return TIMER_IRQ;
+}
+
+unsigned int tftf_get_timer_step_value(void)
+{
+ assert(TIMER_STEP_VALUE);
+
+ return TIMER_STEP_VALUE;
+}
+
+/*
+ * There are 4 cases that could happen when a system is resuming from system
+ * suspend. The cases are:
+ * 1. The resumed core is the last core to power down and the
+ * timer interrupt was targeted to it. In this case, target the
+ * interrupt to our core and set the appropriate priority and enable it.
+ *
+ * 2. The resumed core was the last core to power down but the timer interrupt
+ * is targeted to another core because of timer request grouping within
+ * TIMER_STEP_VALUE. In this case, re-target the interrupt to our core
+ * and set the appropriate priority and enable it
+ *
+ * 3. The system suspend request was down-graded by firmware and the timer
+ * interrupt is targeted to another core which woke up first. In this case,
+ * that core will wake us up and the interrupt_req_time[] corresponding to
+ * our core will be cleared. In this case, no need to do anything as GIC
+ * state is preserved.
+ *
+ * 4. The system suspend is woken up by another external interrupt other
+ * than the timer framework interrupt. In this case, just enable the
+ * timer interrupt and set the correct priority at GICD.
+ */
+void tftf_timer_gic_state_restore(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ spin_lock(&timer_lock);
+
+ arm_gic_set_intr_priority(TIMER_IRQ, GIC_HIGHEST_NS_PRIORITY);
+ arm_gic_intr_enable(TIMER_IRQ);
+
+ /* Check if the programmed core is the woken up core */
+ if (interrupt_req_time[core_pos] == INVALID_TIME) {
+ INFO("The programmed core is not the one woken up\n");
+ } else {
+ current_prog_core = core_pos;
+ arm_gic_set_intr_target(TIMER_IRQ, core_pos);
+ }
+
+ spin_unlock(&timer_lock);
+}
+
diff --git a/tftf/tests/common/test_helpers.c b/tftf/tests/common/test_helpers.c
new file mode 100644
index 0000000..8fdfded
--- /dev/null
+++ b/tftf/tests/common/test_helpers.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+int is_sys_suspend_state_ready(void)
+{
+ int aff_info;
+ unsigned int target_node;
+ u_register_t target_mpid;
+ u_register_t current_mpid = read_mpidr_el1() & MPID_MASK;
+
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+
+ /* Skip current CPU, as it is powered on */
+ if (target_mpid == current_mpid)
+ continue;
+
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0);
+ if (aff_info != PSCI_STATE_OFF)
+ return 0;
+ }
+
+ return 1;
+}
+
+void psci_system_reset(void)
+{
+ smc_args args = { SMC_PSCI_SYSTEM_RESET };
+ smc_ret_values ret;
+
+ ret = tftf_smc(&args);
+
+ /* The PSCI SYSTEM_RESET call is not supposed to return */
+ tftf_testcase_printf("System didn't reboot properly (%d)\n",
+ (unsigned int)ret.ret0);
+}
+
+int psci_mem_protect(int val)
+{
+ smc_args args = { SMC_PSCI_MEM_PROTECT};
+ smc_ret_values ret;
+
+ args.arg1 = val;
+ ret = tftf_smc(&args);
+
+ return ret.ret0;
+}
+
+int psci_mem_protect_check(uintptr_t addr, size_t size)
+{
+ smc_args args = { SMC_PSCI_MEM_PROTECT_CHECK };
+ smc_ret_values ret;
+
+ args.arg1 = addr;
+ args.arg2 = size;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+/*
+ * This function returns an address that can be used as
+ * sentinel for mem_protect functions. The logic behind
+ * it is that it has to search one region that doesn't intersect
+ * with the memory used by TFTF.
+ */
+unsigned char *psci_mem_prot_get_sentinel(void)
+{
+ const mem_region_t *ranges, *rp, *lim;
+ int nranges;
+ IMPORT_SYM(uintptr_t, __TFTF_BASE__, tftf_base);
+ IMPORT_SYM(uintptr_t, __TFTF_END__, tftf_end);
+ uintptr_t p = 0;
+
+ ranges = plat_get_prot_regions(&nranges);
+ if (!ranges)
+ return NULL;
+
+ lim = &ranges[nranges];
+ for (rp = ranges ; rp < lim; rp++) {
+ p = rp->addr;
+ if (p < tftf_base || p > tftf_end)
+ break;
+ p = p + (rp->size - 1);
+ if (p < tftf_base || p > tftf_end)
+ break;
+ }
+
+ return (rp == lim) ? NULL : (unsigned char *) p;
+}
+
+/*
+ * This function maps the memory region before the
+ * test and unmap it after the test is run
+ */
+test_result_t map_test_unmap(const map_args_unmap_t *args,
+ test_function_arg_t test)
+{
+ int mmap_ret;
+ test_result_t test_ret;
+
+ mmap_ret = mmap_add_dynamic_region(args->addr, args->addr,
+ args->size, args->attr);
+
+ if (mmap_ret != 0) {
+ tftf_testcase_printf("Couldn't map memory (ret = %d)\n",
+ mmap_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ test_ret = (*test)(args->arg);
+
+ mmap_ret = mmap_remove_dynamic_region(args->addr, args->size);
+ if (mmap_ret != 0) {
+ tftf_testcase_printf("Couldn't unmap memory (ret = %d)\n",
+ mmap_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return test_ret;
+}
diff --git a/tftf/tests/extensions/amu/test_amu.c b/tftf/tests/extensions/amu/test_amu.c
new file mode 100644
index 0000000..2e2ea6f
--- /dev/null
+++ b/tftf/tests/extensions/amu/test_amu.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <amu.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#define SUSPEND_TIME_1_SEC 1000
+
+static volatile int wakeup_irq_received[PLATFORM_CORE_COUNT];
+
+/* Dummy timer handler that sets a flag to check it has been called. */
+static int suspend_wakeup_handler(void *data)
+{
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ assert(wakeup_irq_received[core_pos] == 0);
+
+ wakeup_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/*
+ * Helper function to suspend a CPU to power level 0 and wake it up with
+ * a timer.
+ */
+static test_result_t suspend_and_resume_this_cpu(void)
+{
+ uint32_t stateid;
+ int psci_ret;
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ /* Prepare wakeup timer. IRQs need to be enabled. */
+ wakeup_irq_received[core_pos] = 0;
+
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ /* Program timer to fire interrupt after timer expires */
+ tftf_program_timer(SUSPEND_TIME_1_SEC);
+
+ /*
+ * Suspend the calling CPU to power level 0 and power
+ * state.
+ */
+ psci_ret = tftf_psci_make_composite_state_id(PSTATE_AFF_LVL_0,
+ PSTATE_TYPE_POWERDOWN,
+ &stateid);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ mp_printf("Failed to make composite state ID @ CPU %d. rc = %x\n",
+ core_pos, psci_ret);
+ result = TEST_RESULT_FAIL;
+ } else {
+ unsigned int power_state = tftf_make_psci_pstate(PSTATE_AFF_LVL_0,
+ PSTATE_TYPE_POWERDOWN, stateid);
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (!wakeup_irq_received[core_pos]) {
+ mp_printf("Didn't receive wakeup IRQ in CPU %d.\n",
+ core_pos);
+ result = TEST_RESULT_FAIL;
+ }
+
+ if (psci_ret != PSCI_E_SUCCESS) {
+ mp_printf("Failed to suspend from CPU %d. ret: %x\n",
+ core_pos, psci_ret);
+ result = TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wake up. Remove timer after waking up.*/
+ tftf_cancel_timer();
+ tftf_timer_unregister_handler();
+
+ return result;
+}
+
+/*
+ * Check that group0/group1 counters are non-zero. As EL3
+ * has enabled the counters before the first entry to NS world,
+ * the counters should have increased by the time we reach this
+ * test case.
+ */
+test_result_t test_amu_nonzero_ctr(void)
+{
+ int i;
+
+ if (!amu_supported())
+ return TEST_RESULT_SKIPPED;
+
+ /* If counters are not enabled, then skip the test */
+ if (read_amcntenset0_el0() != AMU_GROUP0_COUNTERS_MASK ||
+ read_amcntenset1_el0() != AMU_GROUP1_COUNTERS_MASK)
+ return TEST_RESULT_SKIPPED;
+
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) {
+ uint64_t v;
+
+ v = amu_group0_cnt_read(i);
+ if (v == 0) {
+ tftf_testcase_printf("Group0 counter cannot be 0\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) {
+ uint64_t v;
+
+ v = amu_group1_cnt_read(i);
+ if (v == 0) {
+ tftf_testcase_printf("Group1 counter cannot be 0\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Check that the counters are non-decreasing during
+ * a suspend/resume cycle.
+ */
+test_result_t test_amu_suspend_resume(void)
+{
+ uint64_t group0_ctrs[AMU_GROUP0_MAX_NR_COUNTERS];
+ uint64_t group1_ctrs[AMU_GROUP1_MAX_NR_COUNTERS];
+ int i;
+
+ if (!amu_supported())
+ return TEST_RESULT_SKIPPED;
+
+ /* If counters are not enabled, then skip the test */
+ if (read_amcntenset0_el0() != AMU_GROUP0_COUNTERS_MASK ||
+ read_amcntenset1_el0() != AMU_GROUP1_COUNTERS_MASK)
+ return TEST_RESULT_SKIPPED;
+
+ /* Save counters values before suspend */
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ group0_ctrs[i] = amu_group0_cnt_read(i);
+
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
+ group1_ctrs[i] = amu_group1_cnt_read(i);
+
+ /* Suspend/resume current core */
+ suspend_and_resume_this_cpu();
+
+ /*
+ * Check if counter values are >= than the stored values.
+ * If they are not, the AMU context save/restore in EL3 is buggy.
+ */
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) {
+ uint64_t v;
+
+ v = amu_group0_cnt_read(i);
+ if (v < group0_ctrs[i]) {
+ tftf_testcase_printf("Invalid counter value: before: %llx, after: %llx\n",
+ (unsigned long long)group0_ctrs[i],
+ (unsigned long long)v);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) {
+ uint64_t v;
+
+ v = amu_group1_cnt_read(i);
+ if (v < group1_ctrs[i]) {
+ tftf_testcase_printf("Invalid counter value: before: %llx, after: %llx\n",
+ (unsigned long long)group1_ctrs[i],
+ (unsigned long long)v);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/framework_validation_tests/test_timer_framework.c b/tftf/tests/framework_validation_tests/test_timer_framework.c
new file mode 100644
index 0000000..d7bd4f9
--- /dev/null
+++ b/tftf/tests/framework_validation_tests/test_timer_framework.c
@@ -0,0 +1,556 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <irq.h>
+#include <mmio.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+
+/* Used to confirm the CPU is woken up by IRQ_WAKE_SGI or Timer IRQ */
+static volatile int requested_irq_received[PLATFORM_CORE_COUNT];
+/* Used to count number of CPUs woken up by IRQ_WAKE_SGI */
+static int multiple_timer_count;
+/* Used to count number of CPUs woken up by Timer IRQ */
+static int timer_switch_count;
+/* Timer step value of a platform */
+static unsigned int timer_step_value;
+/* Used to program the interrupt time */
+static unsigned long long next_int_time;
+/* Lock to prevent concurrently modifying next_int_time */
+static spinlock_t int_timer_access_lock;
+/* Lock to prevent concurrently modifying irq handler data structures */
+static spinlock_t irq_handler_lock;
+
+/* Variable to confirm all cores are inside the testcase */
+static volatile unsigned int all_cores_inside_test;
+
+/*
+ * Used by test cases to confirm if the programmed timer is fired. It also
+ * keeps track of how many timer irq's are received.
+ */
+static int requested_irq_handler(void *data)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int irq_id = *(unsigned int *) data;
+
+ assert(irq_id == IRQ_WAKE_SGI || irq_id == tftf_get_timer_irq());
+ assert(requested_irq_received[core_pos] == 0);
+
+ if (irq_id == tftf_get_timer_irq()) {
+ spin_lock(&irq_handler_lock);
+ timer_switch_count++;
+ spin_unlock(&irq_handler_lock);
+ }
+
+ requested_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/*
+ * Used by test cases to confirm if the programmed timer is fired. It also
+ * keeps track of how many WAKE_SGI's are received.
+ */
+static int multiple_timer_handler(void *data)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int irq_id = *(unsigned int *) data;
+
+ assert(irq_id == IRQ_WAKE_SGI || irq_id == tftf_get_timer_irq());
+ assert(requested_irq_received[core_pos] == 0);
+
+ if (irq_id == IRQ_WAKE_SGI) {
+ spin_lock(&irq_handler_lock);
+ multiple_timer_count++;
+ spin_unlock(&irq_handler_lock);
+ }
+
+ requested_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/*
+ * @Test_Aim@ Validates timer interrupt framework and platform timer driver for
+ * generation and routing of interrupt to a powered on core.
+ *
+ * Returns SUCCESS or waits forever in wfi()
+ */
+test_result_t test_timer_framework_interrupt(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ int ret;
+
+ /* Initialise common variable across tests */
+ requested_irq_received[core_pos] = 0;
+
+ /* Register timer handler to confirm it received the timer interrupt */
+ ret = tftf_timer_register_handler(requested_irq_handler);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register timer handler:0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = tftf_program_timer(tftf_get_timer_step_value() + 1);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to program timer:0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+ wfi();
+
+ while (!requested_irq_received[core_pos])
+ ;
+ ret = tftf_timer_unregister_handler();
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to unregister timer handler:0x%x\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t timer_target_power_down_cpu(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int power_state;
+ unsigned int stateid;
+ int ret;
+ unsigned long timer_delay;
+
+ tftf_send_event(&cpu_ready[core_pos]);
+ /* Initialise common variable across tests */
+ requested_irq_received[core_pos] = 0;
+
+ /* Construct the state-id for power down */
+ ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
+ PSTATE_TYPE_POWERDOWN, &stateid);
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to construct composite state\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Register timer handler to confirm it received the timer interrupt */
+ ret = tftf_timer_register_handler(requested_irq_handler);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register timer handler:0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait for all cores to be up */
+ while (!all_cores_inside_test)
+ ;
+
+ power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0,
+ PSTATE_TYPE_POWERDOWN, stateid);
+
+ spin_lock(&int_timer_access_lock);
+ timer_delay = PLAT_SUSPEND_ENTRY_TIME + next_int_time;
+ next_int_time -= 2 * (timer_step_value + PLAT_SUSPEND_ENTRY_EXIT_TIME);
+ spin_unlock(&int_timer_access_lock);
+
+ ret = tftf_program_timer_and_suspend(timer_delay, power_state,
+ NULL, NULL);
+ if (ret != 0) {
+ tftf_testcase_printf(
+ "Failed to program timer or suspend CPU: 0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ while (!requested_irq_received[core_pos])
+ ;
+ ret = tftf_timer_unregister_handler();
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to unregister timer handler:0x%x\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Validates routing of timer interrupt to the lowest requested
+ * timer interrupt core on power down.
+ *
+ * Power up all the cores and each requests a timer interrupt lesser than
+ * the previous requested core by timer step value. Doing this ensures
+ * at least some cores would be waken by Time IRQ.
+ *
+ * Returns SUCCESS if all cores power up on getting the interrupt.
+ */
+test_result_t test_timer_target_power_down_cpu(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ unsigned int rc;
+ unsigned int valid_cpu_count;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++)
+ tftf_init_event(&cpu_ready[i]);
+
+ if (!timer_step_value)
+ timer_step_value = tftf_get_timer_step_value();
+
+ timer_switch_count = 0;
+ all_cores_inside_test = 0;
+
+ /*
+ * To be sure none of the CPUs do not fall in an atomic slice,
+ * all CPU's program the timer as close as possible with a time
+ * difference of twice the sum of step value and suspend entry
+ * exit time.
+ */
+ next_int_time = 2 * (timer_step_value + PLAT_SUSPEND_ENTRY_EXIT_TIME) * (PLATFORM_CORE_COUNT + 2);
+
+ /*
+ * Preparation step: Power on all cores.
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ rc = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) timer_target_power_down_cpu,
+ 0);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, rc);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all non-lead CPUs to be ready */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ all_cores_inside_test = 1;
+
+ rc = timer_target_power_down_cpu();
+
+ valid_cpu_count = 0;
+ /* Wait for all cores to complete the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ core_pos = platform_get_core_pos(cpu_mpid);
+ while (!requested_irq_received[core_pos])
+ ;
+ valid_cpu_count++;
+ }
+
+ if (timer_switch_count != valid_cpu_count) {
+ tftf_testcase_printf("Expected timer switch: %d Actual: %d\n",
+ valid_cpu_count, timer_switch_count);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t timer_same_interval(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int power_state;
+ unsigned int stateid;
+ int ret;
+
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ /* Initialise common variable across tests */
+ requested_irq_received[core_pos] = 0;
+
+ /* Construct the state-id for power down */
+ ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
+ PSTATE_TYPE_POWERDOWN, &stateid);
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to construct composite state\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Register timer handler to confirm it received the timer interrupt */
+ ret = tftf_timer_register_handler(multiple_timer_handler);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register timer handler:0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait for all cores to be up */
+ while (!all_cores_inside_test)
+ ;
+
+ /*
+ * Lets hope with in Suspend entry time + 10ms, at least some of the CPU's
+ * have same interval
+ */
+ power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0,
+ PSTATE_TYPE_POWERDOWN, stateid);
+ ret = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME + 10,
+ power_state, NULL, NULL);
+ if (ret != 0) {
+ tftf_testcase_printf(
+ "Failed to program timer or suspend CPU: 0x%x\n", ret);
+ }
+
+ while (!requested_irq_received[core_pos])
+ ;
+ ret = tftf_timer_unregister_handler();
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to unregister timer handler:0x%x\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Validates routing of timer interrupt when multiple cores
+ * requested same time.
+ *
+ * Power up all the cores and each core requests same time.
+ *
+ * Returns SUCCESS if all cores get an interrupt and power up.
+ */
+test_result_t test_timer_target_multiple_same_interval(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ unsigned int rc;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++)
+ tftf_init_event(&cpu_ready[i]);
+
+ multiple_timer_count = 0;
+ all_cores_inside_test = 0;
+
+ /*
+ * Preparation step: Power on all cores.
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ rc = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) timer_same_interval,
+ 0);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, rc);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all non-lead CPUs to be ready */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ core_pos = platform_get_core_pos(lead_mpid);
+ /* Initialise common variable across tests */
+ requested_irq_received[core_pos] = 0;
+
+ all_cores_inside_test = 1;
+
+ rc = timer_same_interval();
+
+ /* Wait for all cores to complete the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ core_pos = platform_get_core_pos(cpu_mpid);
+ while (!requested_irq_received[core_pos])
+ ;
+ }
+
+ if (rc != TEST_RESULT_SUCCESS)
+ return rc;
+
+ /* At least 2 CPUs requests should fall in same timer period. */
+ return multiple_timer_count ? TEST_RESULT_SUCCESS : TEST_RESULT_SKIPPED;
+}
+
+static test_result_t do_stress_test(void)
+{
+ unsigned int power_state;
+ unsigned int stateid;
+ unsigned int timer_int_interval;
+ unsigned int verify_cancel;
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned long long end_time;
+ unsigned long long current_time;
+ int ret;
+
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ end_time = mmio_read_64(SYS_CNT_BASE1 + CNTPCT_LO) + read_cntfrq_el0() * 10;
+
+ /* Construct the state-id for power down */
+ ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
+ PSTATE_TYPE_POWERDOWN, &stateid);
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to construct composite state\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Register a handler to confirm its woken by programmed interrupt */
+ ret = tftf_timer_register_handler(requested_irq_handler);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register timer handler:0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ do {
+ current_time = mmio_read_64(SYS_CNT_BASE1 + CNTPCT_LO);
+ if (current_time > end_time)
+ break;
+
+ timer_int_interval = 1 + rand() % 5;
+ verify_cancel = rand() % 5;
+
+ requested_irq_received[core_pos] = 0;
+
+ /*
+ * If verify_cancel == 0, cancel the programmed timer. As it can
+ * take values from 0 to 4, we will be cancelling only 20% of
+ * times.
+ */
+ if (!verify_cancel) {
+ ret = tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME +
+ timer_int_interval);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to program timer: "
+ "0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = tftf_cancel_timer();
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to cancel timer: "
+ "0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ power_state = tftf_make_psci_pstate(
+ MPIDR_AFFLVL0, PSTATE_TYPE_POWERDOWN,
+ stateid);
+
+ ret = tftf_program_timer_and_suspend(
+ PLAT_SUSPEND_ENTRY_TIME + timer_int_interval,
+ power_state, NULL, NULL);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to program timer "
+ "or suspend: 0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!requested_irq_received[core_pos]) {
+ /*
+ * Cancel the interrupt as the CPU has been
+ * woken by some other interrupt
+ */
+ ret = tftf_cancel_timer();
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to cancel timer:0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+ }
+ } while (1);
+
+ ret = tftf_timer_unregister_handler();
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to unregister timer handler:0x%x\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Stress tests timer framework by requesting combination of
+ * timer requests with SUSPEND and cancel calls.
+ *
+ * Returns SUCCESS if all the cores successfully wakes up from suspend
+ * and returns back to the framework.
+ */
+test_result_t stress_test_timer_framework(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ unsigned int rc;
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ tftf_init_event(&cpu_ready[i]);
+ requested_irq_received[i] = 0;
+ }
+ /*
+ * Preparation step: Power on all cores.
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ rc = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) do_stress_test,
+ 0);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, rc);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all non-lead CPUs to be ready */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ return do_stress_test();
+}
diff --git a/tftf/tests/framework_validation_tests/test_validation_events.c b/tftf/tests/framework_validation_tests/test_validation_events.c
new file mode 100644
index 0000000..e229b17
--- /dev/null
+++ b/tftf/tests/framework_validation_tests/test_validation_events.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <tftf_lib.h>
+
+/* Events structures used by this test case */
+static event_t lead_cpu_event;
+static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
+static event_t test_is_finished;
+
+static test_result_t non_lead_cpu_fn(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+ tftf_wait_for_event(&lead_cpu_event);
+
+ /*
+ * Wait for lead CPU's signal before exiting the test.
+ * Introduce a delay so that the lead CPU will send the event before the
+ * non-lead CPUs wait for it.
+ */
+ waitms(500);
+ tftf_wait_for_event(&test_is_finished);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Validate the events API
+ *
+ * This test exercises the events API.
+ * - It creates a sequence of events sending and receiving. The order of
+ * operations is ensured by inserting delays at strategic points.
+ * - It tests the communication in both directions (i.e. from CPUx to CPUy and
+ * vice versa).
+ * - It tests that it doesn't matter whether CPUx waits for the event first
+ * then CPUy sends the event, or that things happen in the other order.
+ * - It tests the API on a single CPU.
+ *
+ * This test is skipped if an error occurs during the bring-up of non-lead CPUs.
+ * Otherwise, this test always returns success. If something goes wrong, the
+ * test will most probably hang because the system will go into a WFE/SEV dead
+ * lock.
+ */
+test_result_t test_validation_events(void)
+{
+ unsigned int lead_cpu;
+ unsigned int cpu_mpid;
+ unsigned int cpu_node;
+ unsigned int core_pos;
+ unsigned int cpus_count;
+ int psci_ret;
+
+ lead_cpu = read_mpidr_el1() & MPID_MASK;
+
+ /*
+ * The events API should work on a single CPU, provided that the event
+ * is sent before we wait for it. If we do things the other way around,
+ * the CPU will end up stuck in WFE state.
+ */
+ tftf_send_event(&lead_cpu_event);
+ tftf_wait_for_event(&lead_cpu_event);
+
+ /* Re-init lead_cpu_event to be able to reuse it */
+ tftf_init_event(&lead_cpu_event);
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t) non_lead_cpu_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, psci_ret);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all CPUs to have entered the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+ }
+
+ /*
+ * Introduce a delay so that the non-lead CPUs will wait for this event
+ * before the lead CPU sends it.
+ */
+ waitms(500);
+ /* Send the event to half of the CPUs */
+ cpus_count = PLATFORM_CORE_COUNT / 2;
+ tftf_send_event_to(&lead_cpu_event, cpus_count);
+ waitms(500);
+ /* Send the event to the other half of the CPUs */
+ tftf_send_event_to(&lead_cpu_event, PLATFORM_CORE_COUNT - cpus_count);
+
+ /* Signal termination of the test to all CPUs */
+ tftf_send_event_to_all(&test_is_finished);
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/framework_validation_tests/test_validation_irq.c b/tftf/tests/framework_validation_tests/test_validation_irq.c
new file mode 100644
index 0000000..2e9e373
--- /dev/null
+++ b/tftf/tests/framework_validation_tests/test_validation_irq.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <irq.h>
+#include <platform.h>
+#include <sgi.h>
+#include <tftf_lib.h>
+
+static volatile unsigned int counter;
+
+/*
+ * IRQ handler for SGI #0.
+ * Increment the test counter to prove it's been successfully called.
+ */
+static int increment_counter(void *data)
+{
+ counter++;
+ return 0;
+}
+
+#if !DEBUG
+static int set_counter_to_42(void *data)
+{
+ counter = 42;
+ return 0;
+}
+#endif
+
+/*
+ * @Test_Aim@ Test IRQ handling on lead CPU
+ *
+ * Check that IRQ enabling/disabling and IRQ handler registering/unregistering
+ * work as expected on the lead CPU.
+ */
+test_result_t test_validation_irq(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ const unsigned int sgi_id = IRQ_NS_SGI_0;
+ int ret;
+
+ counter = 0;
+
+ /* Now register a handler */
+ ret = tftf_irq_register_handler(sgi_id, increment_counter);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register initial IRQ handler\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_irq_enable(sgi_id, GIC_HIGHEST_NS_PRIORITY);
+
+ /*
+ * Send the SGI to the calling core and check the IRQ handler has been
+ * successfully called
+ */
+ tftf_send_sgi(sgi_id, core_pos);
+
+ /* Wait till the handler is executed */
+ while (counter != 1)
+ ;
+
+ /*
+ * Try to overwrite the IRQ handler. This should fail.
+ * In debug builds, it would trigger an assertion so we can't test that
+ * as it will stop the test session.
+ * In release builds though, it should just do nothing, i.e. it won't
+ * replace the existing handler and that's something that can be tested.
+ */
+#if !DEBUG
+ ret = tftf_irq_register_handler(sgi_id, set_counter_to_42);
+ if (ret == 0) {
+ tftf_testcase_printf(
+ "Overwriting the IRQ handler should have failed\n");
+ return TEST_RESULT_FAIL;
+ }
+#endif
+
+ tftf_send_sgi(sgi_id, core_pos);
+ while (counter != 2)
+ ;
+
+ /* Unregister the IRQ handler */
+ ret = tftf_irq_unregister_handler(sgi_id);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to unregister IRQ handler\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send the SGI to the calling core and check the former IRQ handler
+ * has not been called, now that it has been unregistered.
+ */
+ tftf_send_sgi(sgi_id, core_pos);
+
+ /*
+ * Wait for some time so that SGI interrupts the processor, Normally it
+ * takes a small but finite time for the IRQ to be sent to processor
+ */
+ waitms(500);
+
+ if (counter != 2) {
+ tftf_testcase_printf(
+ "IRQ handler hasn't been successfully unregistered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Try to unregister the IRQ handler again. This should fail.
+ * In debug builds, it would trigger an assertion so we can't test that
+ * as it will stop the test session.
+ * In release builds though, it should just do nothing.
+ */
+#if !DEBUG
+ ret = tftf_irq_unregister_handler(sgi_id);
+ if (ret == 0) {
+ tftf_testcase_printf(
+ "Unregistering the IRQ handler again should have failed\n");
+ return TEST_RESULT_FAIL;
+ }
+#endif
+
+ tftf_irq_disable(sgi_id);
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/framework_validation_tests/test_validation_nvm.c b/tftf/tests/framework_validation_tests/test_validation_nvm.c
new file mode 100644
index 0000000..7a10e19
--- /dev/null
+++ b/tftf/tests/framework_validation_tests/test_validation_nvm.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <nvm.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <tftf_lib.h>
+
+#define PER_CPU_BUFFER_OFFSET 0x08
+
+/* Events to specify activity to lead cpu */
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+static event_t test_done[PLATFORM_CORE_COUNT];
+
+/* Used to make concurrent access to flash by all the cores */
+static volatile int cpu_concurrent_write;
+
+/*
+ * @Test_Aim@ Test Non-Volatile Memory support
+ *
+ * Try reading/writing data from/to NVM to check that basic NVM support is
+ * working as expected.
+ */
+test_result_t test_validation_nvm(void)
+{
+ STATUS status;
+ unsigned test_value1 = 0x12345678;
+ unsigned test_value2 = 0;
+
+ /* Write a value in NVM */
+ status = tftf_nvm_write(TFTF_STATE_OFFSET(testcase_buffer),
+ &test_value1, sizeof(test_value1));
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("tftf_nvm_write: error %d\n", status);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Read it back from NVM */
+ status = tftf_nvm_read(TFTF_STATE_OFFSET(testcase_buffer),
+ &test_value2, sizeof(test_value2));
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("tftf_nvm_read: error (%d)\n", status);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check that the 2 values match */
+ if (test_value1 != test_value2) {
+ tftf_testcase_printf("Values mismatch: %u != %u\n",
+ test_value1, test_value2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Odd CPU's write to flash and even CPU's read the flash */
+static unsigned int access_flash_concurrent(unsigned int core_pos)
+{
+ unsigned int ret;
+ unsigned int test_value;
+
+ if (core_pos % 2) {
+ ret = tftf_nvm_write(TFTF_STATE_OFFSET(testcase_buffer)
+ + core_pos * PER_CPU_BUFFER_OFFSET,
+ &core_pos, sizeof(core_pos));
+ if (ret != STATUS_SUCCESS) {
+ tftf_testcase_printf("Write failed\n");
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ /* Dummy read */
+ ret = tftf_nvm_read(TFTF_STATE_OFFSET(testcase_buffer),
+ &test_value, sizeof(test_value));
+ if (ret != STATUS_SUCCESS) {
+ tftf_testcase_printf("Read failed\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test concurrent memory access to Non-Volatile Memory
+ *
+ * Try reading/writing data from multiple cores to NVM and verify the operations are
+ * serialised and also the device does not crash.
+ *
+ */
+static test_result_t test_validate_nvm_secondary(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ unsigned int ret;
+
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ /* Wait until all cores are ready to access simultaneously */
+ while (!cpu_concurrent_write)
+ ;
+
+ ret = access_flash_concurrent(core_pos);
+
+ tftf_send_event(&test_done[core_pos]);
+
+ return ret;
+}
+
+/*
+ * @Test_Aim@ Test serialisation of access by multiple CPU's
+ *
+ * Try reading/writing data to flash from all the CPU's with as much
+ * concurrency as possible. Check the device does not hang and also
+ * the update to flash happened as expected
+ */
+test_result_t test_validate_nvm_serialisation(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid, target_node;
+ unsigned int core_pos;
+ unsigned int lead_core_pos;
+ unsigned int test_value;
+ unsigned int ret;
+ int rc;
+ char init_buffer[TEST_BUFFER_SIZE] = {0};
+
+ /* Initialise the scratch flash */
+ ret = tftf_nvm_write(TFTF_STATE_OFFSET(testcase_buffer),
+ &init_buffer,
+ sizeof(init_buffer));
+ if (ret != STATUS_SUCCESS) {
+ tftf_testcase_printf("Write failed\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Power on all the cores */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU as it is already on */
+ if (target_mpid == lead_mpid)
+ continue;
+ rc = tftf_cpu_on(target_mpid,
+ (uintptr_t) test_validate_nvm_secondary,
+ 0);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d)\n",
+ target_mpid, rc);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all non-lead CPU's to be ready */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ lead_core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ /*
+ * Send event to all CPU's so that we can have as much concurrent
+ * access to flash as possible
+ */
+ cpu_concurrent_write = 1;
+
+ ret = access_flash_concurrent(lead_core_pos);
+
+ if (ret != TEST_RESULT_SUCCESS)
+ return ret;
+
+ /* Wait for all non-lead CPU's to complete the test */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&test_done[core_pos]);
+ }
+
+ /* Validate the results */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+
+ core_pos = platform_get_core_pos(target_mpid);
+
+ tftf_nvm_read(TFTF_STATE_OFFSET(testcase_buffer) +
+ core_pos * PER_CPU_BUFFER_OFFSET,
+ &test_value,
+ sizeof(test_value));
+
+ if ((core_pos % 2) && (test_value != core_pos)) {
+ tftf_testcase_printf("Concurrent flash access test "
+ "failed on cpu index: %d test_value:%d \n",
+ core_pos, test_value);
+ return TEST_RESULT_FAIL;
+ } else if (((core_pos % 2) == 0) && (test_value != 0)) {
+ tftf_testcase_printf("Concurrent flash access test "
+ "failed on cpu index: %d test_value:%d \n",
+ core_pos, test_value);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/framework_validation_tests/test_validation_sgi.c b/tftf/tests/framework_validation_tests/test_validation_sgi.c
new file mode 100644
index 0000000..806bc58
--- /dev/null
+++ b/tftf/tests/framework_validation_tests/test_validation_sgi.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <irq.h>
+#include <platform.h>
+#include <sgi.h>
+#include <tftf_lib.h>
+
+/*
+ * The 2 following global variables are used by the SGI handler to return
+ * information to the main test function.
+ */
+static sgi_data_t sgi_data;
+
+/* Flag to indicate whether the SGI has been handled */
+static volatile unsigned int sgi_handled;
+
+static int sgi_handler(void *data)
+{
+ /* Save SGI data */
+ sgi_data = *(sgi_data_t *) data;
+ sgi_handled = 1;
+
+ /* Return value doesn't matter */
+ return 0;
+}
+
+/*
+ * @Test_Aim@ Test SGI support on lead CPU
+ *
+ * 1) Register a local IRQ handler for SGI 0.
+ * 2) Send SGI 0 to the calling core, i.e. the lead CPU.
+ * 3) Check the correctness of the data received in the IRQ handler.
+ *
+ * TODO: Improve this test by sending SGIs to all cores in the system.
+ * This will ensure that IRQs are correctly configured on all cores.
+ */
+test_result_t test_validation_sgi(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ const unsigned int sgi_id = IRQ_NS_SGI_0;
+ test_result_t test_res = TEST_RESULT_SUCCESS;
+ int ret;
+
+ /* Register the local IRQ handler for the SGI */
+ ret = tftf_irq_register_handler(sgi_id, sgi_handler);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register IRQ %u (%d)",
+ sgi_id, ret);
+ return TEST_RESULT_FAIL;
+ }
+ tftf_irq_enable(sgi_id, GIC_HIGHEST_NS_PRIORITY);
+
+ /* Send the SGI to the lead CPU */
+ tftf_send_sgi(sgi_id, core_pos);
+
+ /*
+ * Wait for the SGI to be handled.
+ * The SGI handler will update a global variable to reflect that.
+ */
+ while (sgi_handled == 0)
+ continue;
+
+ /* Verify the data received in the SGI handler */
+ if (sgi_data.irq_id != sgi_id) {
+ tftf_testcase_printf("Wrong IRQ ID, expected %u, got %u\n",
+ sgi_id, sgi_data.irq_id);
+ test_res = TEST_RESULT_FAIL;
+ }
+
+ tftf_irq_disable(sgi_id);
+ tftf_irq_unregister_handler(sgi_id);
+
+ return test_res;
+}
diff --git a/tftf/tests/fwu_tests/test_fwu_auth.c b/tftf/tests/fwu_tests/test_fwu_auth.c
new file mode 100644
index 0000000..ac7d71f
--- /dev/null
+++ b/tftf/tests/fwu_tests/test_fwu_auth.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <fwu_nvm.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <psci.h>
+#include <smccc.h>
+#include <status.h>
+#include <tftf_lib.h>
+
+/*
+ * @Test_Aim@ Validate the FWU AUTH failure case.
+ * The Firmware Update feature implemented in Trusted Firmware-A
+ * code needs to be tested to check if FWU process gets started
+ * when watchdog resets the system due to Authentication
+ * failure of an image in BL1/BL2 stage.
+ * Test SUCCESS in case Firmware Update was done.
+ * Test FAIL in case Firmware Update was not done.
+ */
+test_result_t test_fwu_auth(void)
+{
+ STATUS status;
+ unsigned int flag;
+ smc_args args = { SMC_PSCI_SYSTEM_RESET };
+ smc_ret_values ret = {0};
+
+ if (tftf_is_rebooted()) {
+ /*
+ * Check if the FIP update is done.
+ */
+ status = fwu_nvm_read(FWU_TFTF_TESTCASE_BUFFER_OFFSET, &flag, 4);
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("Failed to read NVM (%d)\n", status);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (flag != FIP_IMAGE_UPDATE_DONE_FLAG) {
+ tftf_testcase_printf("FIP was not updated\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+ }
+
+ /*
+ * Corrupt the flash offset for authentication failure.
+ */
+ flag = 0xdeadbeef;
+ status = fwu_nvm_write(FIP_CORRUPT_OFFSET, &flag, 4);
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("Failed to corrupt FIP (%d)\n", status);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /*
+ * Provide the backup FIP address.
+ */
+ flag = FIP_BKP_ADDRESS;
+ status = fwu_nvm_write(FWU_TFTF_TESTCASE_BUFFER_OFFSET, &flag, 4);
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("Failed to update backup FIP address (%d)\n",
+ status);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Notify that we are rebooting now. */
+ tftf_notify_reboot();
+
+ /* Request PSCI system reset. */
+ ret = tftf_smc(&args);
+
+ /* The PSCI SYSTEM_RESET call is not supposed to return */
+ tftf_testcase_printf("System didn't reboot properly (%d)\n",
+ (unsigned int)ret.ret0);
+
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/fwu_tests/test_fwu_toc.c b/tftf/tests/fwu_tests/test_fwu_toc.c
new file mode 100644
index 0000000..6d220aa
--- /dev/null
+++ b/tftf/tests/fwu_tests/test_fwu_toc.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <firmware_image_package.h>
+#include <fwu_nvm.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <psci.h>
+#include <smccc.h>
+#include <status.h>
+#include <tftf_lib.h>
+
+/*
+ * @Test_Aim@ Validate the FWU ToC invalid case.
+ * The Firmware Update feature implemented in Trusted Firmware-A
+ * code needs to be tested to check if FWU process gets started
+ * or not when the ToC header value in fip.bin is invalid.
+ * Test SUCCESS in case ToC is found valid.
+ * Test FAIL in case ToC is found invalid.
+ */
+test_result_t test_fwu_toc(void)
+{
+ STATUS status;
+ unsigned int toc_header;
+ smc_args args = { SMC_PSCI_SYSTEM_RESET };
+ smc_ret_values ret = {0};
+
+ if (tftf_is_rebooted()) {
+ /*
+ * Check whether we successfully resumed from the
+ * Firmware Update process. If we have, then the
+ * ToC header value will have been repaired.
+ */
+ status = fwu_nvm_read(0, &toc_header, 4);
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("Failed to read NVM (%d)\n", status);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (toc_header != TOC_HEADER_NAME) {
+ tftf_testcase_printf("ToC is Invalid (%u)\n", toc_header);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+ }
+
+ /* Corrupt the TOC in fip.bin. */
+ toc_header = 0xdeadbeef;
+ status = fwu_nvm_write(0, &toc_header, 4);
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("Failed to overwrite the ToC header (%d)\n",
+ status);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Notify that we are rebooting now. */
+ tftf_notify_reboot();
+
+ /* Request PSCI system reset. */
+ ret = tftf_smc(&args);
+
+ /* The PSCI SYSTEM_RESET call is not supposed to return */
+ tftf_testcase_printf("System didn't reboot properly (%d)\n",
+ (unsigned int)ret.ret0);
+
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/misc_tests/boot_req_tests/test_cntfrq.c b/tftf/tests/misc_tests/boot_req_tests/test_cntfrq.c
new file mode 100644
index 0000000..78289bb
--- /dev/null
+++ b/tftf/tests/misc_tests/boot_req_tests/test_cntfrq.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <mmio.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <psci.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+static test_result_t cntfrq_check(void)
+{
+ u_register_t cntfrq_el0, ns_cntfrq;
+ cntfrq_el0 = read_cntfrq_el0();
+
+ ns_cntfrq = mmio_read_32(SYS_CNT_BASE1 + CNTBASEN_CNTFRQ);
+
+ if (cntfrq_el0 != ns_cntfrq) {
+ tftf_testcase_printf("CNTFRQ read from sys_reg = %llx and NS timer = %llx differs/n",
+ (unsigned long long)cntfrq_el0,
+ (unsigned long long)ns_cntfrq);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * The ARM ARM says that the cntfrq_el0, cntfrq memory mapped register and
+ * the RO views in NS timer frames must all be initialized by the firmware.
+ * (See I3.6.7 and D7.5.1 section in ARM ARM).
+ * This tests the same on all the CPUs in the system.
+ * Returns:
+ * TEST_RESULT_SUCCESS: if all the cntfrq values match
+ * TEST_RESULT_FAIL: if any of the cntfrq value mismatch
+ */
+test_result_t test_cntfrq_check(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_node, cpu_mpid;
+ int rc;
+
+ /* Bring every CPU online */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ rc = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) cntfrq_check,
+ 0);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, rc);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ rc = cntfrq_check();
+
+ /* Wait for the CPUs to turn OFF */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ /* Wait for all non lead CPUs to turn OFF before returning */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ /* Wait for the target CPU to turn OFF */
+ while (tftf_psci_affinity_info(cpu_mpid,
+ MPIDR_AFFLVL0) != PSCI_STATE_OFF)
+ ;
+ }
+
+ return rc;
+}
diff --git a/tftf/tests/misc_tests/inject_serror.S b/tftf/tests/misc_tests/inject_serror.S
new file mode 100644
index 0000000..0d7dbf2
--- /dev/null
+++ b/tftf/tests/misc_tests/inject_serror.S
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <sdei.h>
+
+
+#ifdef AARCH64
+ .globl inject_serror
+ .globl inject_uncontainable
+ .globl serror_sdei_event_handler
+
+/*
+ * Program fault injection register, and wait for ever for the fault to trigger.
+ * Note that Trusted Firmware must be compiled for ARMv8.4 along with
+ * FAULT_INJECTION_SUPPORT=1 for this to work. Besides, the model has to be
+ * launched with fault inject support.
+ *
+ * x0: Fault record number to program
+ * x1: Injected fault properties
+ * x2: Type of error to be generated
+ * x3: Memory location to wait for, or 0 if no waiting is required
+ */
+func inject_serror_record
+ /* Choose Error record 0 on the PE */
+ msr ERRSELR_EL1, x0
+ isb
+
+ /* Enable error reporting */
+ orr x1, x1, #ERXCTLR_ED_BIT
+ msr ERXCTLR_EL1, x1
+
+ /* Program count down timer to 1 */
+ mov x0, #1
+ msr ERXPFGCDN_EL1, x0
+
+ /* Start count down to generate error */
+ orr x2, x2, #ERXPFGCTL_CDEN_BIT
+ msr ERXPFGCTL_EL1, x2
+ isb
+
+ cbz x3, 2f
+
+ /* Clear SError received flag */
+ str xzr, [x3, #0]
+ sevl
+
+1:
+ wfe
+ ldr x0, [x3, #0]
+ cbz x0, 1b
+
+2:
+ ret
+endfunc inject_serror_record
+
+/*
+ * Inject Unrecoverable error through fault record 0. Wait until serror_received
+ * is set by the SDEI handler in response to receving the event.
+ */
+func inject_serror
+ /* Inject fault into record 0 */
+ mov x0, #0
+
+ /* Enable error reporting */
+ mov x1, #ERXCTLR_UE_BIT
+ msr ERXCTLR_EL1, x1
+
+ /* Injected fault control */
+ mov x2, #ERXPFGCTL_UEU_BIT
+
+ /* Wait address */
+ adrp x3, serror_received
+ add x3, x3, :lo12:serror_received
+
+ b inject_serror_record
+endfunc inject_serror
+
+/*
+ * Inject Uncontainable error through fault record 0. This function doesn't wait
+ * as the handling is terminal in EL3.
+ */
+func inject_uncontainable
+ /* Inject fault into record 0 */
+ mov x0, #0
+
+ mov x1, xzr
+
+ /* Injected fault control */
+ mov x2, #ERXPFGCTL_UC_BIT
+
+ /* Nothing to wait for */
+ mov x3, xzr
+
+ b inject_serror_record
+endfunc inject_uncontainable
+
+/*
+ * SDEI event handler for SErrors.
+ */
+func serror_sdei_event_handler
+ stp x29, x30, [sp, #-16]!
+ bl serror_handler
+ ldp x29, x30, [sp], #16
+ mov_imm x0, SDEI_EVENT_COMPLETE
+ mov x1, xzr
+ smc #0
+ b .
+endfunc serror_sdei_event_handler
+#endif
diff --git a/tftf/tests/misc_tests/test_single_fault.c b/tftf/tests/misc_tests/test_single_fault.c
new file mode 100644
index 0000000..e652211
--- /dev/null
+++ b/tftf/tests/misc_tests/test_single_fault.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <arch_helpers.h>
+#include <sdei.h>
+#include <tftf_lib.h>
+
+#ifndef AARCH32
+
+uint64_t serror_received;
+
+extern void inject_serror(void);
+
+int serror_handler(int ev, uint64_t arg)
+{
+ serror_received = 1;
+ tftf_testcase_printf("SError SDEI event received.\n");
+
+ return 0;
+}
+
+extern int serror_sdei_event_handler(int ev, uint64_t arg);
+
+test_result_t test_single_fault(void)
+{
+ int64_t ret;
+ const int event_id = 5000;
+
+ /* Register SDEI handler */
+ ret = sdei_event_register(event_id, serror_sdei_event_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(event_id);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_pe_unmask();
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ inject_serror();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+#else
+
+test_result_t test_single_fault(void)
+{
+ tftf_testcase_printf("Not supported on AArch32.\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+#endif
diff --git a/tftf/tests/misc_tests/test_uncontainable.c b/tftf/tests/misc_tests/test_uncontainable.c
new file mode 100644
index 0000000..79c9031
--- /dev/null
+++ b/tftf/tests/misc_tests/test_uncontainable.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <tftf_lib.h>
+
+#ifndef AARCH32
+
+extern void inject_uncontainable(void);
+
+test_result_t test_uncontainable(void)
+{
+ inject_uncontainable();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+#else
+
+test_result_t test_uncontainable(void)
+{
+ tftf_testcase_printf("Not supported on AArch32.\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+#endif
diff --git a/tftf/tests/performance_tests/smc_latencies.c b/tftf/tests/performance_tests/smc_latencies.c
new file mode 100644
index 0000000..fe22679
--- /dev/null
+++ b/tftf/tests/performance_tests/smc_latencies.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file contains tests that measure the round trip latency of an SMC.
+ * The SMC calls used are simple ones (PSCI_VERSION and the Standard Service
+ * UID) that involve almost no handling on the EL3 firmware's side so that we
+ * come close to measuring the overhead of the SMC itself.
+ */
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <psci.h>
+#include <smccc.h>
+#include <std_svc.h>
+#include <string.h>
+#include <tftf_lib.h>
+#include <utils_def.h>
+
+#define ITERATIONS_CNT 1000
+static unsigned long long raw_results[ITERATIONS_CNT];
+
+/* Latency information in nano-seconds */
+struct latency_info {
+ unsigned long long min;
+ unsigned long long max;
+ unsigned long long avg;
+};
+
+static inline unsigned long long cycles_to_ns(unsigned long long cycles)
+{
+ unsigned long long freq = read_cntfrq_el0();
+ return (cycles * 1000000000) / freq;
+}
+
+/*
+ * Send the given SMC 'ITERATIONS_CNT' times, measure the time it takes to
+ * return back from the SMC call each time, and gather some statistics across
+ * the whole series.
+ *
+ * The statistics consist of:
+ * - minimum latency (i.e the shortest duration across the whole series);
+ * - maximum latency (i.e the longest duration across the whole series);
+ * - average latency.
+ *
+ * These statistics are stored in the latency_info structure whose address
+ * is passed as an argument.
+ *
+ * This function also prints some additional, intermediate information, like the
+ * number of cycles for each SMC and the average number of cycles for an SMC
+ * round trip.
+ */
+static void test_measure_smc_latency(const smc_args *smc_args,
+ struct latency_info *latency)
+{
+ unsigned long long cycles;
+ unsigned long long min_cycles;
+ unsigned long long max_cycles;
+ unsigned long long avg_cycles;
+ unsigned long long cycles_sum = 0;
+
+ min_cycles = UINT64_MAX;
+ max_cycles = 0;
+ memset(raw_results, 0, sizeof(raw_results));
+
+ for (unsigned int i = 0; i < ITERATIONS_CNT; ++i) {
+ cycles = read_cntpct_el0();
+ tftf_smc(smc_args);
+ cycles = read_cntpct_el0() - cycles;
+
+ min_cycles = MIN(min_cycles, cycles);
+ max_cycles = MAX(max_cycles, cycles);
+
+ cycles_sum += cycles;
+
+ raw_results[i] = cycles;
+ }
+
+ avg_cycles = cycles_sum / ITERATIONS_CNT;
+ tftf_testcase_printf("Average number of cycles: %llu\n",
+ (unsigned long long) avg_cycles);
+ latency->min = cycles_to_ns(min_cycles);
+ latency->max = cycles_to_ns(max_cycles);
+ latency->avg = cycles_to_ns(avg_cycles);
+
+ NOTICE("Raw results:\n");
+ for (unsigned int i = 0; i < ITERATIONS_CNT; ++i) {
+ NOTICE("%llu cycles\t%llu ns\n",
+ raw_results[i], cycles_to_ns(raw_results[i]));
+ }
+}
+
+/*
+ * Measure the latency of the PSCI_VERSION SMC and print the result.
+ * This test always succeed.
+ */
+test_result_t smc_psci_version_latency(void)
+{
+ struct latency_info latency;
+ smc_args args = { SMC_PSCI_VERSION };
+
+ test_measure_smc_latency(&args, &latency);
+ tftf_testcase_printf(
+ "Average time: %llu ns (ranging from %llu to %llu)\n",
+ (unsigned long long) latency.avg,
+ (unsigned long long) latency.min,
+ (unsigned long long) latency.max);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Measure the latency of the Standard Service Call UID SMC and print the
+ * result.
+ * This test always succeed.
+ */
+test_result_t smc_std_svc_call_uid_latency(void)
+{
+ struct latency_info latency;
+ smc_args args = { SMC_STD_SVC_UID };
+
+ test_measure_smc_latency(&args, &latency);
+ tftf_testcase_printf(
+ "Average time: %llu ns (ranging from %llu to %llu)\n",
+ (unsigned long long) latency.avg,
+ (unsigned long long) latency.min,
+ (unsigned long long) latency.max);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t smc_arch_workaround_1(void)
+{
+ struct latency_info latency;
+ smc_args args;
+ smc_ret_values ret;
+ int32_t expected_ver;
+
+ /* Check if SMCCC version is at least v1.1 */
+ expected_ver = MAKE_SMCCC_VERSION(1, 1);
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_VERSION;
+ ret = tftf_smc(&args);
+ if ((int32_t)ret.ret0 < expected_ver) {
+ printf("Unexpected SMCCC version: 0x%x\n",
+ (int)ret.ret0);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Check if SMCCC_ARCH_WORKAROUND_1 is implemented */
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_ARCH_FEATURES;
+ args.arg1 = SMCCC_ARCH_WORKAROUND_1;
+ ret = tftf_smc(&args);
+ if ((int)ret.ret0 == -1) {
+ printf("SMCCC_ARCH_WORKAROUND_1 is not implemented\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_ARCH_WORKAROUND_1;
+
+ test_measure_smc_latency(&args, &latency);
+ tftf_testcase_printf(
+ "Average time: %llu ns (ranging from %llu to %llu)\n",
+ (unsigned long long) latency.avg,
+ (unsigned long long) latency.min,
+ (unsigned long long) latency.max);
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_1.c b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_1.c
new file mode 100644
index 0000000..4a45ad4
--- /dev/null
+++ b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_1.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <psci.h>
+#include <smccc.h>
+#include <string.h>
+#include <tftf_lib.h>
+
+#ifdef AARCH64
+#define CORTEX_A57_MIDR 0x410FD070
+#define CORTEX_A72_MIDR 0x410FD080
+#define CORTEX_A73_MIDR 0x410FD090
+#define CORTEX_A75_MIDR 0x410FD0A0
+
+static int cortex_a57_test(void);
+static int csv2_test(void);
+
+static struct ent {
+ unsigned int midr;
+ int (*wa_required)(void);
+} entries[] = {
+ { .midr = CORTEX_A57_MIDR, .wa_required = cortex_a57_test },
+ { .midr = CORTEX_A72_MIDR, .wa_required = csv2_test },
+ { .midr = CORTEX_A73_MIDR, .wa_required = csv2_test },
+ { .midr = CORTEX_A75_MIDR, .wa_required = csv2_test },
+};
+
+static int cortex_a57_test(void)
+{
+ return 1;
+}
+
+static int csv2_test(void)
+{
+ uint64_t pfr0;
+
+ pfr0 = read_id_aa64pfr0_el1() >> ID_AA64PFR0_CSV2_SHIFT;
+ if ((pfr0 & ID_AA64PFR0_CSV2_MASK) == 1)
+ return 0;
+ return 1;
+}
+
+static test_result_t test_smccc_entrypoint(void)
+{
+ smc_args args;
+ smc_ret_values ret;
+ int32_t expected_ver;
+ unsigned int my_midr, midr_mask;
+ int wa_required;
+ size_t i;
+
+ /* Check if SMCCC version is at least v1.1 */
+ expected_ver = MAKE_SMCCC_VERSION(1, 1);
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_VERSION;
+ ret = tftf_smc(&args);
+ if ((int32_t)ret.ret0 < expected_ver) {
+ tftf_testcase_printf("Unexpected SMCCC version: 0x%x\n",
+ (int)ret.ret0);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Check if SMCCC_ARCH_WORKAROUND_1 is required or not */
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_ARCH_FEATURES;
+ args.arg1 = SMCCC_ARCH_WORKAROUND_1;
+ ret = tftf_smc(&args);
+ if ((int)ret.ret0 == -1) {
+ tftf_testcase_printf("SMCCC_ARCH_WORKAROUND_1 is not implemented\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* If the call returns 0, it means the workaround is required */
+ if ((int)ret.ret0 == 0)
+ wa_required = 1;
+ else
+ wa_required = 0;
+
+ /* Check if the SMC return value matches our expectations */
+ my_midr = (unsigned int)read_midr_el1();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ for (i = 0; i < ARRAY_SIZE(entries); i++) {
+ struct ent *entp = &entries[i];
+
+ if ((my_midr & midr_mask) == (entp->midr & midr_mask)) {
+ if (entp->wa_required() != wa_required)
+ return TEST_RESULT_FAIL;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(entries) && wa_required) {
+ tftf_testcase_printf("TFTF workaround table out of sync with TF\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Invoke the workaround to make sure nothing nasty happens */
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_ARCH_WORKAROUND_1;
+ tftf_smc(&args);
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_smccc_arch_workaround_1(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /* Power on all the non-lead cores. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)test_smccc_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ /*
+ * Wait for test_smccc_entrypoint to return
+ * and the CPU to power down
+ */
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ }
+
+ return test_smccc_entrypoint();
+}
+#else
+test_result_t test_smccc_arch_workaround_1(void)
+{
+ INFO("%s skipped on AArch32\n", __func__);
+ return TEST_RESULT_SKIPPED;
+}
+#endif
diff --git a/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_2.c b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_2.c
new file mode 100644
index 0000000..dd0542c
--- /dev/null
+++ b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_2.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <psci.h>
+#include <smccc.h>
+#include <string.h>
+#include <tftf_lib.h>
+
+#ifdef AARCH64
+#define CORTEX_A76_MIDR 0x410fd0b0
+
+static int cortex_a76_test(void);
+
+static struct ent {
+ unsigned int midr;
+ int (*wa_required)(void);
+} entries[] = {
+ { .midr = CORTEX_A76_MIDR, .wa_required = cortex_a76_test },
+};
+
+static int cortex_a76_test(void)
+{
+ return 1;
+}
+
+static test_result_t test_smccc_entrypoint(void)
+{
+ smc_args args;
+ smc_ret_values ret;
+ int32_t expected_ver;
+ unsigned int my_midr, midr_mask;
+ int wa_required;
+ size_t i;
+
+ /* Check if SMCCC version is at least v1.1 */
+ expected_ver = MAKE_SMCCC_VERSION(1, 1);
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_VERSION;
+ ret = tftf_smc(&args);
+ if ((int32_t)ret.ret0 < expected_ver) {
+ tftf_testcase_printf("Unexpected SMCCC version: 0x%x\n",
+ (int)ret.ret0);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Check if SMCCC_ARCH_WORKAROUND_2 is required or not */
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_ARCH_FEATURES;
+ args.arg1 = SMCCC_ARCH_WORKAROUND_2;
+ ret = tftf_smc(&args);
+ if ((int)ret.ret0 == -1) {
+ tftf_testcase_printf("SMCCC_ARCH_WORKAROUND_2 is not implemented\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* If the call returns 0, it means the workaround is required */
+ if ((int)ret.ret0 == 0)
+ wa_required = 1;
+ else
+ wa_required = 0;
+
+ /* Check if the SMC return value matches our expectations */
+ my_midr = (unsigned int)read_midr_el1();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ for (i = 0; i < ARRAY_SIZE(entries); i++) {
+ struct ent *entp = &entries[i];
+
+ if ((my_midr & midr_mask) == (entp->midr & midr_mask)) {
+ if (entp->wa_required() != wa_required)
+ return TEST_RESULT_FAIL;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(entries) && wa_required) {
+ tftf_testcase_printf("TFTF workaround table out of sync with TF\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Invoke the workaround to make sure nothing nasty happens */
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_ARCH_WORKAROUND_2;
+ tftf_smc(&args);
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_smccc_arch_workaround_2(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /* Power on all the non-lead cores. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)test_smccc_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ /*
+ * Wait for test_smccc_entrypoint to return
+ * and the CPU to power down
+ */
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ }
+
+ return test_smccc_entrypoint();
+}
+#else
+test_result_t test_smccc_arch_workaround_2(void)
+{
+ INFO("%s skipped on AArch32\n", __func__);
+ return TEST_RESULT_SKIPPED;
+}
+#endif
diff --git a/tftf/tests/runtime_services/secure_service/secure_service_helpers.c b/tftf/tests/runtime_services/secure_service/secure_service_helpers.c
new file mode 100644
index 0000000..8675dae
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/secure_service_helpers.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+#include <secure_partition.h>
+#include <string.h>
+
+
+secure_partition_request_info_t *create_sps_request(uint32_t id,
+ const void *data,
+ uint64_t data_size)
+{
+ secure_partition_request_info_t *sps_request
+ = (void *) ARM_SECURE_SERVICE_BUFFER_BASE;
+ sps_request->id = id;
+ sps_request->data_size = data_size;
+ if (data_size != 0)
+ memcpy(sps_request->data, data, data_size);
+ return sps_request;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_secure_service_handle.c b/tftf/tests/runtime_services/secure_service/test_secure_service_handle.c
new file mode 100644
index 0000000..ce4dd5c
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_secure_service_handle.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <mm_svc.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <secure_partition.h>
+#include <smccc.h>
+#include <spm_svc.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <xlat_tables_v2.h>
+
+static event_t cpu_has_finished_test[PLATFORM_CORE_COUNT];
+
+/* Test routine for test_secure_partition_secondary_cores_seq() */
+static test_result_t test_secure_partition_secondary_cores_seq_fn(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+ u_register_t cpu_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(cpu_mpid);
+
+ secure_partition_request_info_t *sps_request =
+ create_sps_request(SPS_CHECK_ALIVE, NULL, 0);
+
+ INFO("Sending MM_COMMUNICATE_AARCH64 from CPU %u\n",
+ platform_get_core_pos(read_mpidr_el1() & MPID_MASK));
+
+ smc_args mm_communicate_smc = {
+ MM_COMMUNICATE_AARCH64,
+ 0,
+ (u_register_t) sps_request,
+ 0
+ };
+
+ smc_ret_values smc_ret = tftf_smc(&mm_communicate_smc);
+
+ if ((uint32_t)smc_ret.ret0 != 0) {
+ tftf_testcase_printf("Cactus returned: 0x%x\n",
+ (uint32_t)smc_ret.ret0);
+
+ result = TEST_RESULT_FAIL;
+ }
+
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ This tests that secondary CPUs can access SPM services
+ * sequentially.
+ */
+test_result_t test_secure_partition_secondary_cores_seq(void)
+{
+ int psci_ret;
+ u_register_t lead_mpid, cpu_mpid;
+ unsigned int cpu_node, core_pos;
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ SKIP_TEST_IF_MM_VERSION_LESS_THAN(1, 0);
+
+ VERBOSE("Mapping NS<->SP shared buffer\n");
+
+ int rc = mmap_add_dynamic_region(ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_SIZE,
+ MT_MEMORY | MT_RW | MT_NS);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_add_dynamic_region() = %d\n",
+ __LINE__, rc);
+ result = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ INFO("Lead CPU is CPU %u\n", platform_get_core_pos(lead_mpid));
+
+ if (test_secure_partition_secondary_cores_seq_fn() != TEST_RESULT_SUCCESS) {
+ result = TEST_RESULT_FAIL;
+ goto exit_unmap;
+ }
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, we have already tested it */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ tftf_init_event(&cpu_has_finished_test[core_pos]);
+
+ VERBOSE("Powering on CPU %u\n", core_pos);
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_secure_partition_secondary_cores_seq_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU %d (rc = %d)\n",
+ core_pos, psci_ret);
+ result = TEST_RESULT_FAIL;
+ goto exit_unmap;
+ }
+
+ tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
+ }
+
+exit_unmap:
+ VERBOSE("Unmapping NS<->SP shared buffer\n");
+
+ mmap_remove_dynamic_region(ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_SIZE);
+
+exit:
+ return result;
+}
+
+/******************************************************************************/
+
+static event_t cpu_can_start_test[PLATFORM_CORE_COUNT];
+
+/* Test routine for test_secure_partition_secondary_core() */
+static test_result_t test_secure_partition_secondary_cores_sim_fn(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+ u_register_t cpu_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(cpu_mpid);
+
+ secure_partition_request_info_t *sps_request =
+ create_sps_request(SPS_CHECK_ALIVE, NULL, 0);
+
+ smc_args mm_communicate_smc = {
+ MM_COMMUNICATE_AARCH64,
+ 0,
+ (u_register_t) sps_request,
+ 0
+ };
+
+ tftf_wait_for_event(&cpu_can_start_test[core_pos]);
+
+ /*
+ * Invoke SMCs for some time to make sure that all CPUs are doing it at
+ * the same time during the test.
+ */
+ for (int i = 0; i < 100; i++) {
+ smc_ret_values smc_ret = tftf_smc(&mm_communicate_smc);
+
+ if ((uint32_t)smc_ret.ret0 != 0) {
+ tftf_testcase_printf("Cactus returned 0x%x at CPU %d\n",
+ (uint32_t)smc_ret.ret0, core_pos);
+ result = TEST_RESULT_FAIL;
+ break;
+ }
+ }
+
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ This tests that secondary CPUs can access SPM services
+ * simultaneously.
+ */
+test_result_t test_secure_partition_secondary_cores_sim(void)
+{
+ int psci_ret;
+ u_register_t lead_mpid, cpu_mpid;
+ unsigned int cpu_node, core_pos;
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ SKIP_TEST_IF_MM_VERSION_LESS_THAN(1, 0);
+
+ VERBOSE("Mapping NS<->SP shared buffer\n");
+
+ int rc = mmap_add_dynamic_region(ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_SIZE,
+ MT_MEMORY | MT_RW | MT_NS);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_add_dynamic_region() = %d\n",
+ __LINE__, rc);
+ result = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ INFO("Lead CPU is CPU %u\n", platform_get_core_pos(lead_mpid));
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_init_event(&cpu_can_start_test[core_pos]);
+ }
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ VERBOSE("Powering on CPU %u\n", core_pos);
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_secure_partition_secondary_cores_sim_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU %d (rc = %d)\n",
+ core_pos, psci_ret);
+ result = TEST_RESULT_FAIL;
+ goto exit_unmap;
+ }
+ }
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_send_event(&cpu_can_start_test[core_pos]);
+ }
+
+ result = test_secure_partition_secondary_cores_sim_fn();
+
+ /* Wait until all CPUs have finished to unmap the NS<->SP buffer */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
+ }
+exit_unmap:
+ VERBOSE("Unmapping NS<->SP shared buffer\n");
+
+ mmap_remove_dynamic_region(ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_SIZE);
+exit:
+ return result;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_secure_service_interrupts.c b/tftf/tests/runtime_services/secure_service/test_secure_service_interrupts.c
new file mode 100644
index 0000000..50c3df6
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_secure_service_interrupts.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <mm_svc.h>
+#include <secure_partition.h>
+#include <smccc.h>
+#include <spm_svc.h>
+#include <string.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <timer.h>
+#include <xlat_tables_v2.h>
+
+static volatile int timer_irq_received;
+
+/*
+ * ISR for the timer interrupt.
+ * Just update a global variable to prove it has been called.
+ */
+static int timer_handler(void *data)
+{
+ assert(timer_irq_received == 0);
+ timer_irq_received = 1;
+ return 0;
+}
+
+
+/*
+ * @Test_Aim@ Test that non-secure interrupts do not interrupt secure service
+ * requests.
+ *
+ * 1. Register a handler for the non-secure timer interrupt.
+ *
+ * 2. Program the non-secure timer to fire in 500 ms.
+ *
+ * 3. Make a long-running (> 500 ms) fast secure service request.
+ * This is achieved by requesting the timer sleep service in Cactus
+ * with a 1 second sleep delay.
+ *
+ * 4. While servicing the timer sleep request, the non-secure timer should
+ * fire but not interrupt Cactus.
+ *
+ * 5. Once back in TFTF, check the response from Cactus, which shows whether the
+ * secure service indeed ran to completion.
+ *
+ * 6. Also check whether the pending non-secure timer interrupt successfully got
+ * handled in TFTF.
+ */
+test_result_t test_secure_partition_interrupt_by_ns(void)
+{
+ secure_partition_request_info_t *sps_request;
+ test_result_t result = TEST_RESULT_FAIL;
+
+ SKIP_TEST_IF_MM_VERSION_LESS_THAN(1, 0);
+
+ VERBOSE("Mapping NS<->SP shared buffer\n");
+
+ int rc = mmap_add_dynamic_region(ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_SIZE,
+ MT_MEMORY | MT_RW | MT_NS);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_add_dynamic_region() = %d\n",
+ __LINE__, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ timer_irq_received = 0;
+ tftf_timer_register_handler(timer_handler);
+
+ NOTICE("Programming the timer...\n");
+ rc = tftf_program_timer(500);
+ if (rc < 0) {
+ tftf_testcase_printf("Failed to program timer (%d)\n", rc);
+ goto exit_test;
+ }
+
+ INFO("Sending MM_COMMUNICATE_AARCH64 to Cactus\n");
+
+ uint8_t timer_delay = 1;
+ sps_request = create_sps_request(SPS_TIMER_SLEEP,
+ &timer_delay, sizeof(timer_delay));
+ smc_args mm_communicate_smc = {
+ MM_COMMUNICATE_AARCH64,
+ 0, /* cookie, MBZ */
+ (uintptr_t) sps_request,
+ 0
+ };
+
+ smc_ret_values smc_ret = tftf_smc(&mm_communicate_smc);
+
+ INFO("Returned from Cactus, MM_COMMUNICATE_AARCH64 handling complete\n");
+
+ /*
+ * If MM_COMMUNICATE gets interrupted, SPM will return SPM_QUEUED, which
+ * is normally not a valid return value for MM_COMMUNICATE.
+ */
+ if ((uint32_t) smc_ret.ret0 != SPM_SUCCESS) {
+ tftf_testcase_printf("Cactus returned: 0x%x\n",
+ (uint32_t) smc_ret.ret0);
+ goto exit_test;
+ }
+
+ uint32_t cactus_response;
+ memcpy(&cactus_response, sps_request->data, sizeof(cactus_response));
+ if (cactus_response != CACTUS_FAST_REQUEST_SUCCESS) {
+ tftf_testcase_printf("Error code from the timer secure service: 0x%x\n",
+ cactus_response);
+ goto exit_test;
+ }
+
+ /*
+ * If the timer interrupt is still pending, make sure it is taken right
+ * now.
+ */
+ isb();
+
+ if (timer_irq_received == 1)
+ result = TEST_RESULT_SUCCESS;
+
+exit_test:
+ tftf_cancel_timer();
+ tftf_timer_unregister_handler();
+
+ VERBOSE("Unmapping NS<->SP shared buffer\n");
+
+ mmap_remove_dynamic_region(ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_SIZE);
+
+ return result;
+}
diff --git a/tftf/tests/runtime_services/sip_service/test_exec_state_switch.c b/tftf/tests/runtime_services/sip_service/test_exec_state_switch.c
new file mode 100644
index 0000000..120a99e
--- /dev/null
+++ b/tftf/tests/runtime_services/sip_service/test_exec_state_switch.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This test suite validates the execution state switch of a non-secure EL (from
+ * AArch32 to AArch64, and vice versa) by issuing ARM SiP service SMC with
+ * varying parameters. A cookie is shared between both states. A field in the
+ * cookie is updated from the other state to signal that state switch did indeed
+ * happen.
+ *
+ * Note that the suite is not AArch32-ready. All test cases will report as
+ * skipped.
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <psci.h>
+#include <tftf_lib.h>
+
+/* Definitions from TF-A arm_sip_svc.h */
+#define ARM_SIP_SVC_VERSION 0x8200ff03
+#define ARM_SIP_SVC_EXE_STATE_SWITCH 0x82000020
+
+/* State switch error codes from SiP service */
+#define STATE_SW_E_PARAM (-2)
+#define STATE_SW_E_DENIED (-3)
+
+#define SIP_VERSION_OK 1
+
+#define HI32(val) (((u_register_t) (val)) >> 32)
+#define LO32(val) ((uint32_t) (u_register_t) (val))
+
+/* A cookie shared between states for information exchange */
+typedef struct {
+ uint32_t pc_hi;
+ uint32_t pc_lo;
+ uint64_t sp;
+ uint32_t success;
+} state_switch_cookie_t;
+
+state_switch_cookie_t state_switch_cookie;
+static event_t secondary_booted __unused;
+
+/*
+ * SiP service version check. Also a signal for test cases to execute or skip
+ * altogether.
+ */
+static int sip_version_check __unused;
+
+/* AArch32 instructions to switch state back to AArch64, stored as data */
+extern void *state_switch_a32_entry;
+
+extern int do_state_switch(void *);
+
+/*
+ * @Test_Aim@ Issue a system reset to initiate state switch SMC call that's part
+ * of ARM SiP service. System reset is required because the state switch SMC
+ * requires that no secondaries have been brought up since booting.
+ */
+test_result_t test_exec_state_switch_reset_before(void)
+{
+#ifdef AARCH64
+ int version;
+ smc_args sip_version_smc = { ARM_SIP_SVC_VERSION };
+ smc_args reset = { SMC_PSCI_SYSTEM_RESET };
+ smc_ret_values smc_ret;
+
+#if NEW_TEST_SESSION
+ /*
+ * This tests suite must start with a system reset. Following a reset,
+ * we expect TFTF to proceed with the rest of test cases. With
+ * NEW_TEST_SESSION set when built, TFTF will run this test case again
+ * after reset. Thus we'll continue resetting forever.
+ *
+ * If NEW_TEST_SESSION is set, skip this test case. sip_version_check
+ * won't be set to SIP_VERSION_OK, thereby skipping rest of test cases
+ * as well.
+ */
+ tftf_testcase_printf("This suite needs TFTF built with NEW_TEST_SESSION=0\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+
+ /*
+ * Query system ARM SiP service version. State switch is available since
+ * version 0.2.
+ */
+ smc_ret = tftf_smc(&sip_version_smc);
+ if (((int) smc_ret.ret0) >= 0) {
+ version = (smc_ret.ret0 << 8) | (smc_ret.ret1 & 0xff);
+ if (version >= 0x02)
+ sip_version_check = SIP_VERSION_OK;
+ } else {
+ tftf_testcase_printf("Test needs SiP service version 0.2 or later\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /*
+ * This test will be continuously re-entered after reboot, until it
+ * returns success.
+ */
+ if (tftf_is_rebooted())
+ return TEST_RESULT_SUCCESS;
+
+ tftf_testcase_printf("Issuing system reset before state switch\n");
+
+ tftf_notify_reboot();
+ tftf_smc(&reset);
+
+ /* System reset is not expected to return */
+ return TEST_RESULT_FAIL;
+#else
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+}
+
+/*
+ * @Test_Aim@ Request execution state switch with invalid entry point. Expect
+ * parameter error when switching from AArch64 to AArch32.
+ */
+test_result_t test_exec_state_switch_invalid_pc(void)
+{
+#ifdef AARCH64
+ int ret;
+
+ smc_args args = {
+ .arg0 = ARM_SIP_SVC_EXE_STATE_SWITCH,
+ .arg1 = (u_register_t) -1,
+ .arg2 = LO32(&state_switch_a32_entry),
+ .arg3 = HI32(&state_switch_cookie),
+ .arg4 = LO32(&state_switch_cookie)
+ };
+
+ if (sip_version_check != SIP_VERSION_OK)
+ return TEST_RESULT_SKIPPED;
+
+ state_switch_cookie.success = 0;
+ ret = do_state_switch(&args);
+ if (state_switch_cookie.success || (ret != STATE_SW_E_PARAM))
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+#else
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+}
+
+/*
+ * @Test_Aim@ Request execution state switch with context_hi, and upper part of
+ * context_lo set. Expect failure as they're not supposed to be set when
+ * switching from AArch64 to AArch32.
+ */
+test_result_t test_exec_state_switch_invalid_ctx(void)
+{
+#ifdef AARCH64
+ int ret;
+
+ smc_args args = {
+ .arg0 = ARM_SIP_SVC_EXE_STATE_SWITCH,
+ .arg1 = HI32(&state_switch_a32_entry),
+ .arg2 = LO32(&state_switch_a32_entry),
+ .arg3 = -1,
+ .arg4 = LO32(&state_switch_cookie)
+ };
+
+ if (sip_version_check != SIP_VERSION_OK)
+ return TEST_RESULT_SKIPPED;
+
+ state_switch_cookie.success = 0;
+ ret = do_state_switch(&args);
+ if (state_switch_cookie.success || (ret != STATE_SW_E_PARAM))
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+#else
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+}
+
+/*
+ * @Test_Aim@ Perform execution state switch, and back. We don't expect any
+ * failures.
+ */
+test_result_t test_exec_state_switch_valid(void)
+{
+#ifdef AARCH64
+ int ret;
+
+ smc_args args = {
+ .arg0 = ARM_SIP_SVC_EXE_STATE_SWITCH,
+ .arg1 = HI32(&state_switch_a32_entry),
+ .arg2 = LO32(&state_switch_a32_entry),
+ .arg3 = HI32(&state_switch_cookie),
+ .arg4 = LO32(&state_switch_cookie)
+ };
+
+ if (sip_version_check != SIP_VERSION_OK)
+ return TEST_RESULT_SKIPPED;
+
+ /* Make sure that we've a 32-bit PC to enter AArch32 */
+ if (HI32(&state_switch_a32_entry)) {
+ tftf_testcase_printf("AArch32 PC wider than 32 bits. Test skipped; needs re-link\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /*
+ * Perform a state switch to 32 and back. Expect success field in the
+ * cookie to be set and return code zero,
+ */
+ state_switch_cookie.success = 0;
+ ret = do_state_switch(&args);
+ if (!state_switch_cookie.success || (ret != 0))
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+#else
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+}
+
+/*
+ * Entry point for the secondary CPU. Send an event to the caller and returns
+ * immediately.
+ */
+static inline test_result_t cpu_ping(void)
+{
+#ifdef AARCH64
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&secondary_booted);
+
+ /*
+ * When returning from the function, the TFTF framework will power CPUs
+ * down, without this test needing to do anything
+ */
+ return TEST_RESULT_SUCCESS;
+#else
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+}
+
+/*
+ * @Test_Aim@ Power on any secondary and request a state switch. We expect the
+ * request to be denied because a secondary had been brought up.
+ */
+test_result_t test_exec_state_switch_after_cpu_on(void)
+{
+#ifdef AARCH64
+ u_register_t other_mpidr, my_mpidr;
+ int ret;
+
+ smc_args args = {
+ .arg0 = ARM_SIP_SVC_EXE_STATE_SWITCH,
+ .arg1 = HI32(&state_switch_a32_entry),
+ .arg2 = LO32(&state_switch_a32_entry),
+ .arg3 = HI32(&state_switch_cookie),
+ .arg4 = LO32(&state_switch_cookie)
+ };
+
+ if (sip_version_check != SIP_VERSION_OK)
+ return TEST_RESULT_SKIPPED;
+
+ /* Make sure that we've a 32-bit PC to enter AArch32 */
+ if (HI32(&state_switch_a32_entry)) {
+ tftf_testcase_printf("AArch32 PC wider than 32 bits. Test skipped; needs re-link\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ tftf_init_event(&secondary_booted);
+
+ /* Find a valid CPU to power on */
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+ other_mpidr = tftf_find_any_cpu_other_than(my_mpidr);
+ if (other_mpidr == INVALID_MPID) {
+ tftf_testcase_printf("Couldn't find a valid other CPU\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Power on the other CPU */
+ ret = tftf_cpu_on(other_mpidr, (uintptr_t) cpu_ping, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ INFO("powering on %llx failed", (unsigned long long)
+ other_mpidr);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait for flag to proceed */
+ tftf_wait_for_event(&secondary_booted);
+
+ /*
+ * Request a state switch to 32 and back. Expect failure since we've
+ * powerd a secondary on.
+ */
+ state_switch_cookie.success = 0;
+ ret = do_state_switch(&args);
+ if ((state_switch_cookie.success != 0) || (ret != STATE_SW_E_DENIED))
+ return TEST_RESULT_FAIL;
+ else
+ return TEST_RESULT_SUCCESS;
+#else
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+}
diff --git a/tftf/tests/runtime_services/sip_service/test_exec_state_switch_asm.S b/tftf/tests/runtime_services/sip_service/test_exec_state_switch_asm.S
new file mode 100644
index 0000000..577f89f
--- /dev/null
+++ b/tftf/tests/runtime_services/sip_service/test_exec_state_switch_asm.S
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros_common.S>
+
+#define COOKIE_SIZE 20
+
+#ifdef AARCH64
+/* int do_state_switch(void *) */
+ .globl do_state_switch
+func do_state_switch
+ /* Temporarily save beginning of stack */
+ mov x7, sp
+
+ /*
+ * When asking to switch execution state, we can't expect general
+ * purpose registers hold their values. EL3 might clear them all; even
+ * if EL3 were to preserve them, the register width shrinks and then
+ * expands, leaving the upper part unknown. So save them before and
+ * restore after call to switch.
+ */
+ stp x8, x9, [sp, #-16]!
+ stp x10, x11, [sp, #-16]!
+ stp x12, x13, [sp, #-16]!
+ stp x14, x15, [sp, #-16]!
+ stp x16, x17, [sp, #-16]!
+ stp x18, x19, [sp, #-16]!
+ stp x20, x21, [sp, #-16]!
+ stp x22, x23, [sp, #-16]!
+ stp x24, x25, [sp, #-16]!
+ stp x26, x27, [sp, #-16]!
+ stp x28, x29, [sp, #-16]!
+
+ /*
+ * State switch effectively means a soft reset; so SCTLR will lose its
+ * value too.
+ */
+ mrs x1, CurrentEL
+ cmp x1, #(2 << 2)
+ b.ne 1f
+ mrs x1, sctlr_el2
+ b 2f
+1:
+ mrs x1, sctlr_el1
+2:
+ stp x30, x1, [sp, #-16]!
+
+ /* Store the PC in the cookie when switching back to AArch64 */
+ ldr x4, =state_switch_cookie
+ adr x2, do_switch_back
+ mov w1, w2
+ lsr x1, x1, #32
+ str w1, [x4, #0] /* PC hi */
+ str w2, [x4, #4] /* PC lo */
+
+ /* Store valid stack pointer in cookie */
+ mov x8, sp
+ str x8, [x4, #8]
+
+ /* Stash stack and LR before calling functions */
+ mov x28, x7
+ mov x29, x30
+
+ mov x10, x0
+
+ /*
+ * Clean and invalidate cookie memory as it's going to be accessed with
+ * MMU off in the new state.
+ */
+ mov x0, x4
+ ldr x1, =COOKIE_SIZE
+ bl flush_dcache_range
+
+ /*
+ * Flush stack context saved on stack as it'll be accessed immediately
+ * after switching back, with MMU off.
+ */
+ mov x0, x8
+ sub x1, x28, x8
+ bl flush_dcache_range
+
+ /* Prepare arguments for state switch SMC */
+ ldr x0, [x10], #8
+ ldr x1, [x10], #8
+ ldr x2, [x10], #8
+ ldr x3, [x10], #8
+ ldr x4, [x10], #8
+
+ /* Switch state */
+ smc #0
+
+ /*
+ * We reach here only if the SMC failed. If so, restore previously
+ * modified callee-saved registers, rewind stack, and return to caller
+ * with the error code from SMC.
+ */
+ mov x1, x28
+ mov x2, x29
+ ldp x28, x29, [sp, #16]
+ mov sp, x1
+ ret x2
+
+restore_context:
+ /* Restore context */
+ ldp x30, x1, [sp], #16
+ ldp x28, x29, [sp], #16
+ ldp x26, x27, [sp], #16
+ ldp x24, x25, [sp], #16
+ ldp x22, x23, [sp], #16
+ ldp x20, x21, [sp], #16
+ ldp x18, x19, [sp], #16
+ ldp x16, x17, [sp], #16
+ ldp x14, x15, [sp], #16
+ ldp x12, x13, [sp], #16
+ ldp x10, x11, [sp], #16
+ ldp x8, x9, [sp], #16
+
+ dsb sy
+ mrs x0, CurrentEL
+ cmp x0, #(2 << 2)
+ b.ne 1f
+ msr sctlr_el2, x1
+ b 2f
+1:
+ msr sctlr_el1, x1
+2:
+ isb
+
+ mov x0, #0
+ ret
+endfunc do_state_switch
+
+/* AArch64 entry point when switching back from AArch32 */
+do_switch_back:
+ /* w0 and w1 have the cookie */
+ lsl x0, x0, #32
+ orr x0, x1, x0
+
+ ldr x1, [x0, #8]
+ mov sp, x1
+
+ b restore_context
+
+ .section .data, "aw"
+
+/* AArch32 instructions to switch state back to AArch64, stored as data */
+ .align 2
+ .globl state_switch_a32_entry
+state_switch_a32_entry:
+ /* Use the same context when switching back */
+ .word 0xe1a03000 /* mov r3, r0 */
+ .word 0xe1a04001 /* mov r4, r1 */
+
+ /* Set success flag in cookie */
+ .word 0xe3a00001 /* mov r0, #1 */
+ .word 0xe5810010 /* str r0, [r1, #16] */
+
+ /* Setup arguments for SMC */
+ .word 0xe3a00020 /* mov r0, #0x0020 */
+ .word 0xe3480200 /* movt r0, #0x8200 */
+
+ .word 0xe5912004 /* ldr r2, [r1, #4] */
+ .word 0xe5911000 /* ldr r1, [r1, #0] */
+ .word 0xe1600070 /* smc #0x0 */
+ .word 0xeafffffe /* b . */
+
+#else /* !AARCH64 */
+
+/* Not supported on AArch32 yet */
+func do_state_switch
+ mov r0, #-1
+ bx lr
+endfunc do_state_switch
+
+#endif /* AARCH64 */
diff --git a/tftf/tests/runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c b/tftf/tests/runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c
new file mode 100644
index 0000000..46b5a92
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c
@@ -0,0 +1,577 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <pmf.h>
+#include <power_management.h>
+#include <psci.h>
+#include <smccc.h>
+#include <string.h>
+#include <sys/errno.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#define TOTAL_IDS 6
+#define ENTER_PSCI 0
+#define EXIT_PSCI 1
+#define ENTER_HW_LOW_PWR 2
+#define EXIT_HW_LOW_PWR 3
+#define ENTER_CFLUSH 4
+#define EXIT_CFLUSH 5
+
+static spinlock_t cpu_count_lock;
+static volatile int cpu_count;
+static volatile int participating_cpu_count;
+static u_register_t timestamps[PLATFORM_CORE_COUNT][TOTAL_IDS];
+static unsigned int target_pwrlvl;
+
+/* Helper function to wait for CPUs participating in the test. */
+static void wait_for_participating_cpus(void)
+{
+ assert(participating_cpu_count <= PLATFORM_CORE_COUNT);
+
+ spin_lock(&cpu_count_lock);
+ cpu_count++;
+ spin_unlock(&cpu_count_lock);
+
+ assert(cpu_count <= PLATFORM_CORE_COUNT);
+
+ while (cpu_count != participating_cpu_count)
+ continue;
+}
+
+/*
+ * Perform an SMC call into TF-A to collect timestamp specified by `tid`
+ * and pass it as a parameter back to the caller.
+ */
+static u_register_t pmf_get_ts(u_register_t tid, u_register_t *v)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = PMF_SMC_GET_TIMESTAMP;
+ args.arg1 = tid;
+ args.arg2 = read_mpidr_el1();
+ ret = tftf_smc(&args);
+ *v = ret.ret1;
+ return ret.ret0;
+}
+
+static int cycles_to_ns(uint64_t cycles, uint64_t freq, uint64_t *ns)
+{
+ if (cycles > UINT64_MAX / 1000000000 || freq == 0)
+ return -ERANGE;
+ *ns = cycles * 1000000000 / freq;
+ return 0;
+}
+
+static u_register_t *get_core_timestamps(void)
+{
+ unsigned int pos = platform_get_core_pos(read_mpidr_el1());
+
+ assert(pos < PLATFORM_CORE_COUNT);
+ return timestamps[pos];
+}
+
+/* Check timestamps for the suspend/cpu off tests. */
+static test_result_t check_pwr_down_ts(void)
+{
+ u_register_t *ts;
+
+ ts = get_core_timestamps();
+ if (!(ts[ENTER_PSCI] <= ts[ENTER_HW_LOW_PWR] &&
+ ts[ENTER_HW_LOW_PWR] <= ts[EXIT_HW_LOW_PWR] &&
+ ts[EXIT_HW_LOW_PWR] <= ts[EXIT_PSCI])) {
+ tftf_testcase_printf("PMF timestamps are not correctly ordered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (ts[ENTER_CFLUSH] > ts[EXIT_CFLUSH]) {
+ tftf_testcase_printf("PMF timestamps are not correctly ordered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Capture all runtime instrumentation timestamps for the current
+ * CPU and store them into the timestamps array.
+ */
+static test_result_t get_ts(void)
+{
+ u_register_t tid, *ts;
+ int i;
+
+ ts = get_core_timestamps();
+ for (i = 0; i < TOTAL_IDS; i++) {
+ tid = PMF_ARM_TIF_IMPL_ID << PMF_IMPL_ID_SHIFT;
+ tid |= PMF_RT_INSTR_SVC_ID << PMF_SVC_ID_SHIFT | i;
+ if (pmf_get_ts(tid, &ts[i]) != 0) {
+ ERROR("Failed to capture PMF timestamp\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Dump suspend statistics for the suspend/cpu off test. */
+static int dump_suspend_stats(const char *msg)
+{
+ u_register_t *ts;
+ u_register_t target_mpid;
+ uint64_t freq, cycles[3], period[3];
+ int cpu_node, ret;
+ unsigned int pos;
+
+ freq = read_cntfrq_el0();
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ pos = platform_get_core_pos(target_mpid);
+ assert(pos < PLATFORM_CORE_COUNT);
+ ts = timestamps[pos];
+
+ cycles[0] = ts[ENTER_HW_LOW_PWR] - ts[ENTER_PSCI];
+ ret = cycles_to_ns(cycles[0], freq, &period[0]);
+ if (ret < 0) {
+ ERROR("cycles_to_ns: out of range\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ cycles[1] = ts[EXIT_PSCI] - ts[EXIT_HW_LOW_PWR];
+ ret = cycles_to_ns(cycles[1], freq, &period[1]);
+ if (ret < 0) {
+ ERROR("cycles_to_ns: out of range\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ cycles[2] = ts[EXIT_CFLUSH] - ts[ENTER_CFLUSH];
+ ret = cycles_to_ns(cycles[2], freq, &period[2]);
+ if (ret < 0) {
+ ERROR("cycles_to_ns: out of range\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ printf("<RT_INSTR:%s\t%d\t%02llu\t%02llu\t%02llu/>\n", msg, pos,
+ (unsigned long long)period[0],
+ (unsigned long long)period[1],
+ (unsigned long long)period[2]);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Dump statistics for a PSCI version call. */
+static int dump_psci_version_stats(const char *msg)
+{
+ u_register_t *ts;
+ u_register_t target_mpid;
+ uint64_t freq, cycles, period;
+ int cpu_node, ret;
+ unsigned int pos;
+
+ freq = read_cntfrq_el0();
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ pos = platform_get_core_pos(target_mpid);
+ assert(pos < PLATFORM_CORE_COUNT);
+ ts = timestamps[pos];
+
+ cycles = ts[EXIT_PSCI] - ts[ENTER_PSCI];
+ ret = cycles_to_ns(cycles, freq, &period);
+ if (ret < 0) {
+ ERROR("cycles_to_ns: out of range\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ printf("<RT_INSTR:%s\t%d\t%02llu/>\n", msg, pos,
+ (unsigned long long)period);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Dummy entry point to turn core off for the CPU off test. */
+static test_result_t dummy_entrypoint(void)
+{
+ wait_for_participating_cpus();
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Entrypoint to collect timestamps for CPU off test. */
+static test_result_t collect_ts_entrypoint(void)
+{
+ wait_for_participating_cpus();
+
+ if (get_ts() != TEST_RESULT_SUCCESS ||
+ check_pwr_down_ts() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Suspend current core to power level specified by `target_pwrlvl`. */
+static test_result_t suspend_current_core(void)
+{
+ unsigned int pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int pwrlvl, susp_type, state_id, power_state;
+ int ret;
+
+ INIT_PWR_LEVEL_INDEX(pstateid_idx);
+
+ tftf_set_deepest_pstate_idx(target_pwrlvl, pstateid_idx);
+ tftf_get_pstate_vars(&pwrlvl, &susp_type, &state_id, pstateid_idx);
+
+ power_state = tftf_make_psci_pstate(pwrlvl, susp_type, state_id);
+
+ ret = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ power_state, NULL, NULL);
+ if (ret != 0) {
+ ERROR("Failed to program timer or suspend CPU: 0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_cancel_timer();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* This entrypoint is used for all suspend tests. */
+static test_result_t suspend_core_entrypoint(void)
+{
+ wait_for_participating_cpus();
+
+ if (suspend_current_core() != TEST_RESULT_SUCCESS ||
+ get_ts() != TEST_RESULT_SUCCESS ||
+ check_pwr_down_ts() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Entrypoint used for the PSCI version test. */
+static test_result_t psci_version_entrypoint(void)
+{
+ u_register_t *ts;
+ int version;
+
+ wait_for_participating_cpus();
+
+ version = tftf_get_psci_version();
+ if (!tftf_is_valid_psci_version(version)) {
+ tftf_testcase_printf(
+ "Wrong PSCI version:0x%08x\n", version);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (get_ts() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ /* Check timestamp order. */
+ ts = get_core_timestamps();
+ if (ts[ENTER_PSCI] > ts[EXIT_PSCI]) {
+ tftf_testcase_printf("PMF timestamps are not correctly ordered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Check if runtime instrumentation is enabled in TF. */
+static int is_rt_instr_supported(void)
+{
+ u_register_t tid, dummy;
+
+ tid = PMF_ARM_TIF_IMPL_ID << PMF_IMPL_ID_SHIFT;
+ tid |= PMF_RT_INSTR_SVC_ID << PMF_SVC_ID_SHIFT;
+ return !pmf_get_ts(tid, &dummy);
+}
+
+/*
+ * This test powers on all on the non-lead cores and brings
+ * them and the lead core to a common synchronization point.
+ * Then a suspend to the deepest power level supported on the
+ * platform is initiated on all cores in parallel.
+ */
+static test_result_t test_rt_instr_susp_parallel(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (is_rt_instr_supported() == 0)
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = tftf_get_total_cpus_count();
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Power on all the non-lead cores. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)suspend_core_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ if (suspend_core_entrypoint() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ /* Wait for the non-lead cores to power down. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ return dump_suspend_stats(__func__);
+}
+
+/*
+ * This tests powers on each non-lead core in sequence and
+ * suspends it to the deepest power level supported on the platform.
+ * It then waits for the core to power off. Each core in
+ * the non-lead cluster will bring the entire clust down when it
+ * powers off because it will be the only core active in the cluster.
+ * The lead core will also be suspended in a similar fashion.
+ */
+static test_result_t test_rt_instr_susp_serial(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (is_rt_instr_supported() == 0)
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = 1;
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Suspend one core at a time. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)suspend_core_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ assert(cpu_count == 0);
+
+ /* Suspend lead core as well. */
+ if (suspend_core_entrypoint() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ return dump_suspend_stats(__func__);
+}
+
+/*
+ * @Test_Aim@ CPU suspend to deepest power level on all cores in parallel.
+ *
+ * This test should exercise contention in TF-A as all the cores initiate
+ * a CPU suspend call in parallel.
+ */
+test_result_t test_rt_instr_susp_deep_parallel(void)
+{
+ target_pwrlvl = PLAT_MAX_PWR_LEVEL;
+ return test_rt_instr_susp_parallel();
+}
+
+/*
+ * @Test_Aim@ CPU suspend on all cores in parallel.
+ *
+ * Suspend all cores in parallel to target power level 0.
+ * Cache associated with power domain level 0 is flushed. For
+ * Juno, the L1 cache is flushed.
+ */
+test_result_t test_rt_instr_cpu_susp_parallel(void)
+{
+ target_pwrlvl = 0;
+ return test_rt_instr_susp_parallel();
+}
+
+/*
+ * @Test_Aim@ CPU suspend to deepest power level on all cores in sequence.
+ *
+ * Each core in the non-lead cluster brings down the entire cluster when
+ * it goes down.
+ */
+test_result_t test_rt_instr_susp_deep_serial(void)
+{
+ target_pwrlvl = PLAT_MAX_PWR_LEVEL;
+ return test_rt_instr_susp_serial();
+}
+
+/*
+ * @Test_Aim@ CPU suspend on all cores in sequence.
+ *
+ * Cache associated with level 0 power domain are flushed. For
+ * Juno, the L1 cache is flushed.
+ */
+test_result_t test_rt_instr_cpu_susp_serial(void)
+{
+ target_pwrlvl = 0;
+ return test_rt_instr_susp_serial();
+}
+
+/*
+ * @Test_Aim@ CPU off on all non-lead cores in sequence and
+ * suspend lead to deepest power level.
+ *
+ * The test sequence is as follows:
+ *
+ * 1) Turn on and turn off each non-lead core in sequence.
+ * 2) Program wake up timer and suspend the lead core to deepest power level.
+ * 3) Turn on each secondary core and get the timestamps from each core.
+ *
+ * All cores in the non-lead cluster bring the cluster
+ * down when they go down. Core 4 brings the big cluster down
+ * when it goes down.
+ */
+test_result_t test_rt_instr_cpu_off_serial(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (is_rt_instr_supported() == 0)
+ return TEST_RESULT_SKIPPED;
+
+ target_pwrlvl = PLAT_MAX_PWR_LEVEL;
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = 1;
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Turn core on/off one at a time. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)dummy_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ assert(cpu_count == 0);
+
+ /* Suspend lead core as well. */
+ if (suspend_core_entrypoint() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ /* Turn core on one at a time and collect timestamps. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)collect_ts_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ assert(cpu_count == 0);
+
+ return dump_suspend_stats(__func__);
+}
+
+/*
+ * @Test_Aim@ PSCI version call on all cores in parallel.
+ */
+test_result_t test_rt_instr_psci_version_parallel(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (is_rt_instr_supported() == 0)
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = tftf_get_total_cpus_count();
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Power on all the non-lead cores. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)psci_version_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ if (psci_version_entrypoint() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ /* Wait for the non-lead cores to power down. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ return dump_psci_version_stats(__func__);
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/affinity_info/test_psci_affinity_info.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/affinity_info/test_psci_affinity_info.c
new file mode 100644
index 0000000..5abb7f0
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/affinity_info/test_psci_affinity_info.c
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <events.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+/* Special value used to terminate the array of expected return values */
+#define END_OF_EXPECTED_VALUE 0xDEADBEEF
+/*
+ * Event used by test test_affinity_info_level0_powerdown() to synchronise
+ * CPUs
+ */
+static event_t cpu_about_to_suspend;
+static unsigned int psci_version;
+
+/*
+ * `expected_values` should contain an array of expected return values
+ * terminated by `END_OF_EXPECTED_VALUE`. If 'actual_value' exists in
+ * one of 'expected_values' then return a test success.
+ * Otherwise, print an error message in the test report and report a test
+ * failure.
+ */
+static test_result_t get_test_result(const int *expected_values, int actual_value)
+{
+ const int *expected_val_list;
+
+ expected_val_list = expected_values;
+ while (*expected_val_list != END_OF_EXPECTED_VALUE) {
+ if (*expected_val_list == actual_value)
+ return TEST_RESULT_SUCCESS;
+ expected_val_list++;
+ }
+
+ expected_val_list = expected_values;
+ tftf_testcase_printf("Unexpected return value: %i Expected values are:",
+ actual_value);
+ while (*expected_val_list != END_OF_EXPECTED_VALUE) {
+ tftf_testcase_printf("%i ", *expected_val_list);
+ expected_val_list++;
+ }
+ tftf_testcase_printf("\n");
+
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 0 on online CPU
+ *
+ * Call PSCI AFFINITY_INFO targeted at affinity level 0 on lead CPU.
+ * Expect the PSCI implementation to report that the affinity instance is on.
+ */
+test_result_t test_affinity_info_level0_on(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ int32_t aff_info;
+ int expected_values[] = {PSCI_STATE_ON, END_OF_EXPECTED_VALUE};
+
+ aff_info = tftf_psci_affinity_info(mpid, MPIDR_AFFLVL0);
+ return get_test_result(expected_values, aff_info);
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 0 on offline CPU
+ *
+ * Call PSCI AFFINITY_INFO targeted at affinity level 0 on all non-lead CPUs.
+ * Expect the PSCI implementation to report that the affinity instances are off.
+ *
+ * This test needs 2 CPUs to run. It will be skipped on a single core platform.
+ */
+test_result_t test_affinity_info_level0_off(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid, target_node;
+ int32_t aff_info;
+ test_result_t ret = TEST_RESULT_SUCCESS;
+ int expected_values[] = {PSCI_STATE_OFF, END_OF_EXPECTED_VALUE};
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU, as it is powered on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0);
+ if (get_test_result(expected_values, aff_info)
+ == TEST_RESULT_FAIL) {
+ ret = TEST_RESULT_FAIL;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 1 on online cluster
+ *
+ * Call PSCI AFFINITY_INFO targeted at affinity level 1 on the lead cluster
+ * (i.e. the cluster to which the lead CPU belongs to).
+ * PSCI implementation prior to PSCI 1.0 needs to report that the cluster is on
+ * and others can also return INVALID_PARAMETERS.
+ */
+test_result_t test_affinity_info_level1_on(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid;
+ int32_t aff_info;
+ int expected_values[3];
+
+ /*
+ * Minimum version of PSCI is 0.2, uses this info to decide if
+ * tftf_get_psci_version() needs to be called or not.
+ */
+ if (!psci_version)
+ psci_version = tftf_get_psci_version();
+
+ /*
+ * From PSCI version 1.0 onwards, Trusted Firmware-A may or may not
+ * track affinity levels greater than zero.
+ */
+ if (!(psci_version & PSCI_MAJOR_VER_MASK)) {
+ expected_values[0] = PSCI_STATE_ON;
+ expected_values[1] = END_OF_EXPECTED_VALUE;
+ } else {
+ expected_values[0] = PSCI_STATE_ON;
+ expected_values[1] = PSCI_E_INVALID_PARAMS;
+ expected_values[2] = END_OF_EXPECTED_VALUE;
+ }
+
+ /*
+ * Build an MPID corresponding to the lead cluster. Set the affinity
+ * level0 bits to some arbitrary value that doesn't correspond to any
+ * CPU on the platform. The PSCI implementation should ignore the
+ * affinity 0 field.
+ */
+ target_mpid = (lead_mpid & MPIDR_CLUSTER_MASK) | 0xE1;
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL1);
+ return get_test_result(expected_values, aff_info);
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 1 on offline cluster
+ *
+ * Call PSCI AFFINITY_INFO targeted at affinity level 1 on a non-lead cluster
+ * (i.e. another cluster than the one to which the lead CPU belongs to).
+ * PSCI implementation prior to PSCI 1.0 needs to report that the cluster is OFF
+ * and others can also return INVALID_PARAMETERS.
+ *
+ * This test needs 2 clusters to run. It will be skipped on a single cluster
+ * platform.
+ */
+test_result_t test_affinity_info_level1_off(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid;
+ int32_t aff_info;
+ unsigned int cluster_id;
+ int expected_values[3];
+
+ SKIP_TEST_IF_LESS_THAN_N_CLUSTERS(2);
+
+ for (cluster_id = 0;
+ cluster_id < tftf_get_total_clusters_count();
+ ++cluster_id) {
+ if (cluster_id != MPIDR_CLUSTER_ID(lead_mpid))
+ break;
+ }
+ assert(cluster_id != tftf_get_total_clusters_count());
+
+
+ /*
+ * Minimum version of PSCI is 0.2, uses this info to decide if
+ * tftf_get_psci_version() needs to be called or not.
+ */
+ if (!psci_version)
+ psci_version = tftf_get_psci_version();
+
+ /*
+ * From PSCI version 1.0 onwards, Trusted Firmware-A may or may not
+ * track affinity levels greater than zero.
+ */
+ if (!(psci_version & PSCI_MAJOR_VER_MASK)) {
+ expected_values[0] = PSCI_STATE_OFF;
+ expected_values[1] = END_OF_EXPECTED_VALUE;
+ } else {
+ expected_values[0] = PSCI_STATE_OFF;
+ expected_values[1] = PSCI_E_INVALID_PARAMS;
+ expected_values[2] = END_OF_EXPECTED_VALUE;
+ }
+
+ /*
+ * Build an MPID corresponding to a non-lead cluster. Set the affinity
+ * level0 bits to some arbitrary value that doesn't correspond to any
+ * CPU on the platform. The PSCI implementation should ignore the
+ * affinity 0 field.
+ */
+ target_mpid = make_mpid(cluster_id, 0xE1);
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL1);
+ return get_test_result(expected_values, aff_info);
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 2
+ *
+ * For PSCI implementations prior to PSCI 1.0 , the expected return value
+ * depends on the the maximum affinity level that the power management
+ * operations can apply to on this platform.
+ * - If the platform doesn't have an affinity level 2 then expect the PSCI
+ * implementation to report that it received invalid parameters.
+ * - If affinity level 2 exists then expect the PSCI implementation to report
+ * that the affinity instance is on.
+ *
+ * From PSCI 1.0 onwards, it can also return either INVALID_PARMETERS
+ */
+test_result_t test_affinity_info_level2(void)
+{
+ int expected_values[3];
+ unsigned int target_mpid;
+ int32_t aff_info;
+
+ /*
+ * Minimum version of PSCI is 0.2, uses this info to decide if
+ * tftf_get_psci_version() needs to be called or not.
+ */
+ if (!psci_version)
+ psci_version = tftf_get_psci_version();
+
+ expected_values[0] = (PLATFORM_MAX_AFFLVL >= 2)
+ ? PSCI_STATE_ON
+ : PSCI_E_INVALID_PARAMS;
+
+ /*
+ * From PSCI version 1.0 onwards, Trusted Firmware-A may or may not
+ * track affinity levels greater than zero.
+ */
+ if (!(psci_version & PSCI_MAJOR_VER_MASK)) {
+ expected_values[1] = END_OF_EXPECTED_VALUE;
+ } else {
+ expected_values[1] = PSCI_E_INVALID_PARAMS;
+ expected_values[2] = END_OF_EXPECTED_VALUE;
+ }
+
+ /*
+ * Build an MPID corresponding to the lead affinity instance at level 2.
+ * Set the affinity level0 & level1 bits to some arbitrary values that
+ * don't correspond to any affinity instance on the platform. The PSCI
+ * implementation should ignore the affinity 0 & 1 fields.
+ */
+ target_mpid = read_mpidr_el1() & (MPIDR_AFFLVL_MASK << MPIDR_AFF_SHIFT(2));
+ target_mpid |= 0xAB << MPIDR_AFF1_SHIFT;
+ target_mpid |= 0xE1 << MPIDR_AFF0_SHIFT;
+
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL2);
+ return get_test_result(expected_values, aff_info);
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 3
+ *
+ * For PSCI implementations prior to PSCI 1.0 , the expected return value
+ * depends on the maximum affinity level that the power management
+ * operations can apply to on this platform.
+ * - If the platform doesn't have an affinity level 3 then expect the PSCI
+ * implementation to report that it received invalid parameters.
+ * - If affinity level 3 exists then expect the PSCI implementation to report
+ * that the affinity instance is on.
+ *
+ * From PSCI 1.0 onwards, it can also return INVALID_PARMETERS
+ */
+test_result_t test_affinity_info_level3(void)
+{
+#ifndef AARCH32
+ int expected_values[3];
+ uint64_t target_mpid;
+ int32_t aff_info;
+
+ /*
+ * Minimum version of PSCI is 0.2, uses this info to decide if
+ * tftf_get_psci_version() needs to be called or not.
+ */
+ if (!psci_version)
+ psci_version = tftf_get_psci_version();
+
+ expected_values[0] = (PLATFORM_MAX_AFFLVL == 3)
+ ? PSCI_STATE_ON
+ : PSCI_E_INVALID_PARAMS;
+
+ /*
+ * From PSCI version 1.0 onwards, Trusted Firmware-A may or may not
+ * track affinity levels greater than zero.
+ */
+ if (!(psci_version & PSCI_MAJOR_VER_MASK)) {
+ expected_values[1] = END_OF_EXPECTED_VALUE;
+ } else {
+ expected_values[1] = PSCI_E_INVALID_PARAMS;
+ expected_values[2] = END_OF_EXPECTED_VALUE;
+ }
+
+ /*
+ * Build an MPID corresponding to the lead affinity instance at level 3.
+ * Set the affinity level0/level1/level2 bits to some arbitrary values
+ * that don't correspond to any affinity instance on the platform. The
+ * PSCI implementation should ignore the affinity 0, 1 & 2 fields.
+ */
+ target_mpid = read_mpidr_el1();
+ target_mpid &= ((uint64_t) MPIDR_AFFLVL_MASK) << MPIDR_AFF_SHIFT(3);
+ target_mpid |= 0xD2 << MPIDR_AFF2_SHIFT;
+ target_mpid |= 0xAB << MPIDR_AFF1_SHIFT;
+ target_mpid |= 0xE1 << MPIDR_AFF0_SHIFT;
+
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL3);
+ return get_test_result(expected_values, aff_info);
+#else
+ return TEST_RESULT_SKIPPED;
+#endif
+}
+
+/*
+ * Suspend to powerdown the calling CPU.
+ *
+ * 1) Enable SGI #0. This SGI will be sent by the lead CPU to wake this CPU.
+ * 2) Suspend the CPU.
+ * 3) Report success/failure of the suspend operation.
+ */
+static test_result_t suspend_to_powerdown(void)
+{
+ uint32_t power_state, stateid;
+ int psci_ret, expected_return_val;
+
+ /*
+ * Enable reception of SGI 0 on the calling CPU.
+ * SGI 0 will serve as the wake-up event to come out of suspend.
+ */
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ expected_return_val = tftf_psci_make_composite_state_id(
+ PSTATE_AFF_LVL_0, PSTATE_TYPE_POWERDOWN, &stateid);
+
+ /* Need at least 1 power down state defined at level 0 */
+ if (expected_return_val != PSCI_E_SUCCESS)
+ return TEST_RESULT_SKIPPED;
+
+ /*
+ * Suspend the calling CPU to the desired affinity level and power state
+ */
+ power_state = tftf_make_psci_pstate(PSTATE_AFF_LVL_0,
+ PSTATE_TYPE_POWERDOWN,
+ stateid);
+
+ /*
+ * Notify the lead CPU that the calling CPU is about to suspend itself
+ */
+ tftf_send_event(&cpu_about_to_suspend);
+
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ tftf_irq_disable(IRQ_NS_SGI_0);
+
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to suspend (%i)\n", psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 0 on a
+ * suspended CPU
+ *
+ * A CPU that has been physically powered down as a result of a call to
+ * CPU_SUSPEND must be reported as ON by the AFFINITY_INFO call. This test
+ * aims at verifying this behaviour.
+ *
+ * This test needs 2 CPUs to run. It will be skipped on a single core platform.
+ * It will also be skipped if an error is encountered during the bring-up of the
+ * non-lead CPU.
+ */
+test_result_t test_affinity_info_level0_powerdown(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid, target_core_pos;
+ int psci_ret;
+ int32_t aff_info;
+ test_result_t ret;
+ int expected_values[] = {PSCI_STATE_ON, END_OF_EXPECTED_VALUE};
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ /*
+ * Preparation step:
+ * Find another CPU than the lead CPU and power it on.
+ */
+ target_mpid = tftf_find_any_cpu_other_than(lead_mpid);
+ assert(target_mpid != INVALID_MPID);
+ target_core_pos = platform_get_core_pos(target_mpid);
+
+ psci_ret = tftf_cpu_on(target_mpid, (uintptr_t) suspend_to_powerdown, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d)\n",
+ target_mpid, psci_ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Wait for the other CPU to initiate the suspend operation */
+ tftf_wait_for_event(&cpu_about_to_suspend);
+
+ /* Wait a bit for the CPU to really enter suspend state */
+ waitms(PLAT_SUSPEND_ENTRY_TIME);
+
+ /* Request status of the non-lead CPU while it is suspended */
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0);
+ ret = get_test_result(expected_values, aff_info);
+
+ /* Wake up non-lead CPU */
+ tftf_send_sgi(IRQ_NS_SGI_0, target_core_pos);
+
+ return ret;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug.c
new file mode 100644
index 0000000..73ac951
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
+
+static test_result_t test_cpu_booted(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ VERBOSE("Hello from core 0x%x\n", mpid);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+
+/*
+ * @Test_Aim@ Test CPU hotplug support
+ *
+ * This test powers on all CPUs using the PSCI CPU_ON API and checks whether the
+ * operation succeeded.
+ */
+test_result_t test_psci_cpu_hotplug(void)
+{
+ test_result_t ret = TEST_RESULT_SUCCESS;
+ unsigned int cpu_node, cpu_mpid;
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos;
+ int psci_ret;
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) test_cpu_booted,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ ret = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * The lead CPU needs to wait for all other CPUs to enter the test.
+ * This is because the test framework declares the end of a test when no
+ * CPU is in the test. Therefore, if the lead CPU goes ahead and exits
+ * the test then potentially there could be no CPU executing the test at
+ * this time because none of them have entered the test yet, hence the
+ * framework will be misled in thinking the test is finished.
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ return ret;
+}
+
+/* Verify context ID passed by lead CPU */
+static test_result_t test_context_ids_non_lead_cpu(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ u_register_t ctx_id = tftf_get_cpu_on_ctx_id(core_pos);
+
+ if (ctx_id != (u_register_t)(mpid + core_pos)) {
+ tftf_testcase_printf("Failed to get context ID in CPU %d\n",
+ core_pos);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Verify the value of context ID from tftf_cpu_on().
+ *
+ * This test powers on all the secondary CPUs and sends different context IDs
+ * when doing so. All CPUs must receive the correct value without it having
+ * been overwritten during the boot process.
+ */
+test_result_t test_context_ids(void)
+{
+ int i;
+ unsigned int lead_mpid;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ int psci_ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+ tftf_init_event(&cpu_booted[i]);
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ /* Pass as context ID something that the CPU can verify */
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) test_context_ids_non_lead_cpu,
+ (u_register_t)(cpu_mpid + core_pos));
+
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, psci_ret);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for non-lead CPUs to enter the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug_invalid.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug_invalid.c
new file mode 100644
index 0000000..8d8758b
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug_invalid.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file implements test cases exercising invalid scenarios of the CPU
+ * hotplug API. It checks that the PSCI implementation responds as per the
+ * PSCI specification.
+ */
+
+#include <arch_helpers.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <tftf_lib.h>
+
+/*
+ * Event data structures used by non-lead CPUs to tell the lead CPU they entered
+ * the test.
+ */
+static event_t entered_test[PLATFORM_CORE_COUNT];
+
+/*
+ * If 'real_value' == 'expected_value' then return a test success.
+ * Otherwise, print an error message in the test report and report a test
+ * failure.
+ */
+static test_result_t report_result(int expected_value, int real_value)
+{
+ if (real_value != expected_value) {
+ tftf_testcase_printf(
+ "Wrong return value, expected %i, got %i\n",
+ expected_value, real_value);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t reissue_cpu_hotplug(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ int psci_ret;
+
+ tftf_send_event(&entered_test[core_pos]);
+
+ /*
+ * This time, we can't use tftf_cpu_on() to issue the power on request
+ * because this would go through too much test framework logic. E.g. the
+ * framework would figure out that the the CPU is already powered on by
+ * looking at the CPU state information it keeps, hence it would report
+ * an error.
+ *
+ * Here we need to bypass the framework and issue the SMC call directly
+ * from the test case itself. tftf_psci_cpu_on() is a simple wrapper
+ * over the SMC call.
+ *
+ * Entry point address argument can be any valid address.
+ */
+ psci_ret = tftf_psci_cpu_on(mpid, (uintptr_t)reissue_cpu_hotplug, 0);
+
+ return report_result(PSCI_E_ALREADY_ON, psci_ret);
+}
+
+/*
+ * @Test_Aim@ Hotplug request on a CPU which is already powered on
+ *
+ * 1) Power on all CPUs.
+ * 2) Each CPU re-issues the PSCI CPU_ON request on itself. This is expected to
+ * fail and the PSCI implementation is expected to report that CPUs are
+ * already powered on.
+ *
+ * The test is skipped if an error is encountered during the bring-up of
+ * non-lead CPUs.
+ */
+test_result_t test_psci_cpu_hotplug_plugged(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_mpid, cpu_node;
+ int psci_ret;
+ unsigned int core_pos;
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t) reissue_cpu_hotplug, 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_SKIPPED;
+
+ /* Wait for the CPU to enter the test */
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&entered_test[core_pos]);
+ }
+
+ return reissue_cpu_hotplug();
+}
+
+/*
+ * @Test_Aim@ Hotplug request on a CPU that doesn't exist
+ *
+ * Such a hotplug request is expected to fail and the PSCI implementation is
+ * expected to report that the parameters are invalid.
+ */
+test_result_t test_psci_cpu_hotplug_invalid_cpu(void)
+{
+ int psci_ret;
+
+ /*
+ * 0xFFFFFFFF is an invalid MPID.
+ * Pass a valid entry point address to make sure that the call does not
+ * fail for the wrong reason.
+ */
+ psci_ret = tftf_psci_cpu_on(0xFFFFFFFF,
+ (uintptr_t) test_psci_cpu_hotplug_invalid_cpu, 0);
+
+ return report_result(PSCI_E_INVALID_PARAMS, psci_ret);
+}
+
+/*
+ * @Test_Aim@ Hotplug request on a CPU with invalid entrypoint address
+ *
+ * Such a hotplug request is expected to fail and the PSCI implementation is
+ * expected to report that the entrypoint is invalid address for PSCI 1.0
+ * onwards
+ */
+test_result_t test_psci_cpu_hotplug_invalid_ep(void)
+{
+ int psci_ret;
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int psci_version;
+
+ psci_version = tftf_get_psci_version();
+
+ if (!(psci_version & PSCI_MAJOR_VER_MASK)) {
+ tftf_testcase_printf(
+ "PSCI Version is less then 1.0\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ /*
+ * Here we need to bypass the framework and issue the SMC call
+ * directly from the test case itself as tftf_cpu_on calls SMC
+ * calls with hotplug as entry point. tftf_psci_cpu_on() is a
+ * simple wrapper over the SMC call.
+ *
+ * Entry point address argument can be any invalid address.
+ */
+
+ psci_ret = tftf_psci_cpu_on(cpu_mpid, 0, 0);
+ if (psci_ret != PSCI_E_INVALID_ADDRESS) {
+ tftf_testcase_printf("CPU:0x%x Expected: %i Actual: %i\n",
+ cpu_mpid,
+ PSCI_E_INVALID_ADDRESS,
+ psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c
new file mode 100644
index 0000000..9e9998c
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <events.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+/*
+ * Desired affinity level and state type (standby or powerdown) for the next
+ * CPU_SUSPEND operation. We need these shared variables because there is no way
+ * to pass arguments to non-lead CPUs...
+ */
+static unsigned int test_aff_level;
+static unsigned int test_suspend_type;
+
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+
+/*
+ * Variable used by the non-lead CPUs to tell the lead CPU they
+ * were woken up by IRQ_WAKE_SGI
+ */
+static event_t event_received_wake_irq[PLATFORM_CORE_COUNT];
+
+/* Variable used to confirm the CPU is woken up by IRQ_WAKE_SGI or Timer IRQ */
+static volatile int requested_irq_received[PLATFORM_CORE_COUNT];
+
+static int requested_irq_handler(void *data)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+#if ENABLE_ASSERTIONS
+ unsigned int irq_id = *(unsigned int *) data;
+#endif
+
+ assert(irq_id == IRQ_WAKE_SGI || irq_id == tftf_get_timer_irq());
+ assert(requested_irq_received[core_pos] == 0);
+
+ requested_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/*
+ * Suspend the calling (non-lead) CPU.
+ * 1) Program a wake-up event to come out of suspend state
+ * 2) Suspend the CPU to the desired affinity level and power state (standby or
+ * powerdown)
+ * 3) Report success/failure of the suspend operation
+ */
+static test_result_t suspend_non_lead_cpu(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ uint32_t power_state, stateid;
+ int rc, expected_return_val;
+ u_register_t flags;
+
+ tftf_timer_register_handler(requested_irq_handler);
+
+ /* Tell the lead CPU that the calling CPU is about to suspend itself */
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ /* IRQs need to be disabled prior to programming the timer */
+ /* Preserve DAIF flags*/
+ flags = read_daif();
+ disable_irq();
+
+ rc = tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME);
+ if (rc != 0) {
+ /* Restore previous DAIF flags */
+ write_daif(flags);
+ isb();
+ ERROR("Timer programming failed with error %d\n", rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ expected_return_val = tftf_psci_make_composite_state_id(test_aff_level,
+ test_suspend_type, &stateid);
+
+ /*
+ * Suspend the calling CPU to the desired affinity level and power state
+ */
+ power_state = tftf_make_psci_pstate(test_aff_level,
+ test_suspend_type,
+ stateid);
+ rc = tftf_cpu_suspend(power_state);
+
+ /* Restore previous DAIF flags */
+ write_daif(flags);
+ isb();
+
+ /* Wait until the IRQ wake interrupt is received */
+ while (!requested_irq_received[core_pos])
+ ;
+
+ tftf_send_event(&event_received_wake_irq[core_pos]);
+ tftf_timer_unregister_handler();
+
+ if (rc == expected_return_val)
+ return TEST_RESULT_SUCCESS;
+
+ tftf_testcase_printf("Wrong value: expected %i, got %i\n",
+ expected_return_val, rc);
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * CPU suspend test to the desired affinity level and power state
+ *
+ * 1) Power on all cores
+ * 2) Each core registers a wake-up event to come out of suspend state
+ * 3) Each core tries to enter suspend state
+ *
+ * The test is skipped if an error occurs during the bring-up of non-lead CPUs.
+ */
+static test_result_t test_psci_suspend(unsigned int aff_level,
+ unsigned int suspend_type)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid, target_node;
+ unsigned int core_pos;
+ uint32_t power_state, stateid;
+ int rc, expected_return_val;
+ u_register_t flags;
+
+ if (aff_level > MPIDR_MAX_AFFLVL)
+ return TEST_RESULT_SKIPPED;
+
+ assert((suspend_type == PSTATE_TYPE_POWERDOWN) ||
+ (suspend_type == PSTATE_TYPE_STANDBY));
+
+ /* Export these variables for the non-lead CPUs */
+ test_aff_level = aff_level;
+ test_suspend_type = suspend_type;
+
+ /*
+ * All testcases in this file use the same cpu_ready[] array so it needs
+ * to be re-initialised each time.
+ */
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i) {
+ tftf_init_event(&cpu_ready[i]);
+ tftf_init_event(&event_received_wake_irq[i]);
+ requested_irq_received[i] = 0;
+ }
+ /* Ensure the above writes are seen before any read */
+ dmbsy();
+
+ /*
+ * Preparation step: Power on all cores.
+ */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU as it is already on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ rc = tftf_cpu_on(target_mpid,
+ (uintptr_t) suspend_non_lead_cpu,
+ 0);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ target_mpid, rc);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all non-lead CPUs to be ready */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ /* IRQs need to be disabled prior to programming the timer */
+ /* Preserve DAIF flags*/
+ flags = read_daif();
+ disable_irq();
+
+ /*
+ * Program the timer, this will serve as the
+ * wake-up event to come out of suspend state.
+ */
+ rc = tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME);
+ if (rc) {
+ /* Restore previous DAIF flags */
+ write_daif(flags);
+ isb();
+ ERROR("Timer programming failed with error %d\n", rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ expected_return_val = tftf_psci_make_composite_state_id(test_aff_level,
+ test_suspend_type, &stateid);
+
+ /*
+ * Suspend the calling CPU to the desired affinity level and power state
+ */
+ power_state = tftf_make_psci_pstate(test_aff_level,
+ test_suspend_type,
+ stateid);
+ if (test_aff_level >= PSTATE_AFF_LVL_2)
+ rc = tftf_cpu_suspend_save_sys_ctx(power_state);
+ else
+ rc = tftf_cpu_suspend(power_state);
+
+ /* Restore previous DAIF flags */
+ write_daif(flags);
+ isb();
+
+ /*
+ * Cancel the timer set up by lead CPU in case we have returned early
+ * due to invalid parameters or it will lead to spurious wake-up later.
+ */
+ tftf_cancel_timer();
+
+ /*
+ * Wait for all non-lead CPUs to receive IRQ_WAKE_SGI. This will also
+ * ensure that the lead CPU has received the system timer IRQ
+ * because SGI #IRQ_WAKE_SGI is sent only after that.
+ */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&event_received_wake_irq[core_pos]);
+ }
+
+ if (rc == expected_return_val)
+ return TEST_RESULT_SUCCESS;
+
+ tftf_testcase_printf("Wrong value: expected %i, got %i\n",
+ expected_return_val, rc);
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 0
+ */
+test_result_t test_psci_suspend_powerdown_level0(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_0, PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 0
+ */
+test_result_t test_psci_suspend_standby_level0(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_0, PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 1
+ */
+test_result_t test_psci_suspend_powerdown_level1(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_1, PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 1
+ */
+test_result_t test_psci_suspend_standby_level1(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_1, PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 2
+ */
+test_result_t test_psci_suspend_powerdown_level2(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_2, PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 2
+ */
+test_result_t test_psci_suspend_standby_level2(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_2, PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 3
+ */
+test_result_t test_psci_suspend_powerdown_level3(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_3, PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 3
+ */
+test_result_t test_psci_suspend_standby_level3(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_3, PSTATE_TYPE_STANDBY);
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect/test_mem_protect.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect/test_mem_protect.c
new file mode 100644
index 0000000..35c26e8
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect/test_mem_protect.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <psci.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <xlat_tables_v2.h>
+
+#define SENTINEL 0x55
+#define MEM_PROT_ENABLED 1
+#define MEM_PROT_DISABLED 0
+/*
+ * Test to verify that mem_protect is executed in next boot after calling
+ * the PSCI mem_protect function
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : when after rebooting mem_protect is activated
+ * and the sentinel is detected to have been reset.
+ * TEST_RESULT_FAIL : when some of the calls to mem_protect fails or
+ * sentinel is not cleared after resetting.
+ */
+static test_result_t test_mem_protect_helper(void *arg)
+{
+ int ret;
+ unsigned char value;
+ unsigned char *sentinel = arg;
+
+ assert(sentinel != NULL);
+
+ if (tftf_is_rebooted()) {
+ value = *sentinel;
+ if (value != 0 && value != SENTINEL) {
+ tftf_testcase_printf("Sentinel address modified out of mem_protect:%d\n",
+ value);
+ return TEST_RESULT_FAIL;
+ }
+ if (value == SENTINEL) {
+ tftf_testcase_printf("Sentinel address not cleared by mem_protect\n");
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+ }
+
+ ret = psci_mem_protect(MEM_PROT_DISABLED);
+ if (ret != MEM_PROT_ENABLED && ret != MEM_PROT_DISABLED) {
+ INFO("Mem_protect failed %d\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* mem_protect mechanism should be disabled at this point */
+ ret = psci_mem_protect(MEM_PROT_ENABLED);
+ if (ret != MEM_PROT_DISABLED) {
+ tftf_testcase_printf("Mem_protect failed %d\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* mem_protect mechanism should be enabled at this point */
+ ret = psci_mem_protect(MEM_PROT_ENABLED);
+ if (ret != MEM_PROT_ENABLED) {
+ tftf_testcase_printf("Mem_protect failed %d\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ *sentinel = SENTINEL;
+
+ /* Notify that we are rebooting now. */
+ tftf_notify_reboot();
+
+ psci_system_reset();
+ /*
+ * psci_reset shouldn't return
+ */
+ return TEST_RESULT_FAIL;
+}
+
+test_result_t test_mem_protect(void)
+{
+ map_args_unmap_t args;
+ unsigned char *sentinel;
+ int ret;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_MEM_PROTECT);
+ if (ret == PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("Mem_protect is not supported %d\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ sentinel = psci_mem_prot_get_sentinel();
+ if (sentinel == NULL) {
+ tftf_testcase_printf("Could not find a suitable address for the sentinel.\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ args.addr = (uintptr_t) sentinel & ~PAGE_SIZE_MASK;
+ args.size = PAGE_SIZE;
+ args.attr = MT_RW_DATA;
+ args.arg = sentinel;
+
+ return map_test_unmap(&args, test_mem_protect_helper);
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect_check/mem_protect_check.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect_check/mem_protect_check.c
new file mode 100644
index 0000000..68d15f7
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect_check/mem_protect_check.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+#include <psci.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+static test_result_t mem_prot_check(uintptr_t addr, size_t size, int expected)
+{
+ int ret;
+
+ ret = psci_mem_protect_check(addr, size);
+ if (ret != expected) {
+ tftf_testcase_printf("MEM_PROTEC_CHECK failed in (%llx,%llx)\n",
+ (unsigned long long) addr,
+ (unsigned long long) size);
+ return 0;
+ }
+ return 1;
+}
+
+static test_result_t test_region(const mem_region_t *region)
+{
+ uintptr_t max_addr = region->addr + region->size;
+
+ if (!mem_prot_check(region->addr, 0, PSCI_E_DENIED))
+ return 0;
+ if (!mem_prot_check(region->addr, SIZE_MAX, PSCI_E_DENIED))
+ return 0;
+ if (!mem_prot_check(region->addr, 1, PSCI_E_SUCCESS))
+ return 0;
+ if (!mem_prot_check(region->addr, 1, PSCI_E_SUCCESS))
+ return 0;
+ if (!mem_prot_check(region->addr, region->size - 1, PSCI_E_SUCCESS))
+ return 0;
+ if (!mem_prot_check(max_addr - 1, 1, PSCI_E_SUCCESS))
+ return 0;
+ return 1;
+}
+
+/*
+ * Test to verify that mem_protect_check_range returns correct answer
+ * for known memory locations.
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : When all the checks return the expected value
+ * TEST_RESULT_FAIL : when some check fails or return an unepected value
+ */
+test_result_t test_mem_protect_check(void)
+{
+ int ret, nregions;
+ const mem_region_t *regions;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_MEM_PROTECT_CHECK);
+ if (ret == PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("MEM_PROTECT_CHECK is not supported\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ regions = plat_get_prot_regions(&nregions);
+ if (nregions <= 0) {
+ tftf_testcase_printf("Platform doesn't define testcases for MEM_PROTECT_CHECK\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (!mem_prot_check(UINTPTR_MAX, 1, PSCI_E_DENIED))
+ return TEST_RESULT_FAIL;
+
+ if (!mem_prot_check(1, SIZE_MAX, PSCI_E_DENIED))
+ return TEST_RESULT_FAIL;
+
+ if (!mem_prot_check(UINTPTR_MAX, 0, PSCI_E_DENIED))
+ return TEST_RESULT_FAIL;
+
+ if (!mem_prot_check(0, 1, PSCI_E_DENIED))
+ return TEST_RESULT_FAIL;
+
+ while (nregions-- > 0) {
+ if (!test_region(regions++))
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/migrate_info_type/test_migrate_info_type.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/migrate_info_type/test_migrate_info_type.c
new file mode 100644
index 0000000..73a1048
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/migrate_info_type/test_migrate_info_type.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <psci.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+#include <trusted_os.h>
+#include <tsp.h>
+#include <uuid_utils.h>
+
+/*
+ * @Test_Aim@ Exercise PSCI MIGRATE_INFO_TYPE API
+ *
+ * This test exercises the PSCI MIGRATE_INFO_TYPE API in the following 2
+ * scenarios:
+ *
+ * == No Trusted OS is present ==
+ * In this case,
+ * a) either the EL3 firmware doesn't implement the MIGRATE_INFO_TYPE call
+ * b) or the MIGRATE_INFO_TYPE call should report that the Trusted OS is
+ * not present.
+ * In both cases, the MIGRATE call should not be supported.
+ *
+ * == A Trusted OS is present and it is the TSP ==
+ * In this case, the MIGRATE_INFO_TYPE call should report that the TSP is
+ * MP-capable and hence the MIGRATE call should not be supported.
+ *
+ * This test doesn't support any other Trusted OS than the TSP. It will be
+ * skipped for any other TOS.
+ */
+test_result_t test_migrate_info_type(void)
+{
+ uuid_t tos_uuid;
+ char tos_uuid_str[UUID_STR_SIZE];
+ smc_args args;
+ smc_ret_values ret;
+ int32_t mp_support;
+ int32_t migrate_ret;
+
+ /* Identify the level of multicore support present in the Trusted OS */
+ args.arg0 = SMC_PSCI_MIG_INFO_TYPE;
+ ret = tftf_smc(&args);
+ mp_support = (int32_t) ret.ret0;
+
+ if (is_trusted_os_present(&tos_uuid)) {
+ /* The only Trusted OS that this test supports is the TSP */
+ if (!uuid_equal(&tos_uuid, &tsp_uuid)) {
+ tftf_testcase_printf("Trusted OS is not the TSP, "
+ "its UUID is: %s\n",
+ uuid_to_str(&tos_uuid, tos_uuid_str));
+ return TEST_RESULT_SKIPPED;
+ }
+
+ INFO("TSP detected\n");
+
+ if (mp_support != TSP_MIGRATE_INFO) {
+ tftf_testcase_printf(
+ "Wrong return value for MIGRATE_INFO_TYPE: "
+ "expected %i, got %i\n",
+ TSP_MIGRATE_INFO, mp_support);
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ INFO("No Trusted OS detected\n");
+
+ if ((mp_support != PSCI_E_NOT_SUPPORTED) &&
+ (mp_support != PSCI_TOS_NOT_PRESENT_MP)) {
+ tftf_testcase_printf(
+ "Wrong return value for MIGRATE_INFO_TYPE: "
+ "expected %i or %i, got %i\n",
+ PSCI_E_NOT_SUPPORTED,
+ PSCI_TOS_NOT_PRESENT_MP,
+ mp_support);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /*
+ * Either there is no Trusted OS or the Trusted OS is the TSP.
+ * In both cases, the MIGRATE call should not be supported.
+ */
+ args.arg0 = SMC_PSCI_MIG;
+ /*
+ * Pass a valid MPID so that the MIGRATE call doesn't fail because of
+ * invalid parameters
+ */
+ args.arg1 = read_mpidr_el1() & MPID_MASK;
+ ret = tftf_smc(&args);
+ migrate_ret = (int32_t) ret.ret0;
+
+ if (migrate_ret != PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("Wrong return value for MIGRATE: "
+ "expected %i, got %i\n",
+ PSCI_E_NOT_SUPPORTED, migrate_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_features/test_psci_features.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_features/test_psci_features.c
new file mode 100644
index 0000000..c7b1702
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_features/test_psci_features.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <psci.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+
+/*
+ * @Test_Aim@ Check the list of PSCI functions for PSCI support
+ *
+ * Call PSCI_FEATURES for each PSCI function ID.
+ * - If a PSCI function is mandatory (as per the PSCI specification) then check
+ * the validity of the return flags.
+ * - If a PSCI function is optional (as per the PSCI specification) and
+ * implemented, check the validity of the feature flags.
+ */
+test_result_t test_psci_features(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+ int32_t ret_flag;
+ const psci_function_t *psci_fn;
+
+ for (unsigned int i = 0; i < PSCI_NUM_CALLS; i++) {
+ psci_fn = &psci_functions[i];
+
+ ret_flag = tftf_get_psci_feature_info(psci_fn->id);
+
+ if (!psci_fn->mandatory) {
+ /*
+ * If the PSCI function is optional then the PSCI
+ * implementation is allowed to not implement it.
+ */
+ if (ret_flag == PSCI_E_NOT_SUPPORTED)
+ continue;
+
+ INFO("%s non-mandatory function is SUPPORTED\n",
+ psci_fn->str);
+ } else {
+ /* Check mandatory PSCI call is supported */
+ if (ret_flag == PSCI_E_NOT_SUPPORTED) {
+ result = TEST_RESULT_FAIL;
+ tftf_testcase_printf(
+ "%s mandatory function is NOT SUPPORTED\n",
+ psci_fn->str);
+ continue;
+ }
+ }
+
+ /* Check the feature flags for CPU_SUSPEND PSCI calls */
+ if ((psci_fn->id == SMC_PSCI_CPU_SUSPEND_AARCH32) ||
+ (psci_fn->id == SMC_PSCI_CPU_SUSPEND_AARCH64)) {
+ if ((ret_flag & ~CPU_SUSPEND_FEAT_VALID_MASK) != 0) {
+ result = TEST_RESULT_FAIL;
+ tftf_testcase_printf(
+ "Invalid feature flags for CPU_SUSPEND: 0x%x\n",
+ ret_flag);
+ }
+ } else {
+ /* Check the feature flags for other PSCI calls */
+ if (ret_flag != PSCI_FEATURE_FLAGS_ZERO) {
+ result = TEST_RESULT_FAIL;
+ tftf_testcase_printf(
+ "Wrong feature flags for %s\n, "
+ "expected 0x%08x, got 0x%08x\n",
+ psci_fn->str,
+ PSCI_FEATURE_FLAGS_ZERO, ret_flag);
+ }
+ }
+ }
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ Check invalid PSCI function ids (Negative Test).
+ */
+test_result_t test_psci_features_invalid_id(void)
+{
+ /* Invalid function ids for negative testing */
+ uint32_t invalid_psci_func = 0xc400a011;
+ uint32_t ret_flag = tftf_get_psci_feature_info(invalid_psci_func);
+
+ if (ret_flag == PSCI_E_NOT_SUPPORTED)
+ return TEST_RESULT_SUCCESS;
+
+ tftf_testcase_printf("ERROR: Invalid PSCI function is SUPPORTED\n");
+
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c
new file mode 100644
index 0000000..d39cf5b
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+/* Invoke _func and verifies return value is TEST_RESULT_SUCCESS */
+#define TEST_FUNC(_func) { \
+ int ret = (_func)(); \
+ if (ret != TEST_RESULT_SUCCESS) { \
+ INFO("test_node_hw_state: function " #_func " failed!\n"); \
+ return ret; \
+ } \
+}
+
+/* Enable messages for debugging purposes */
+#if 0
+# define DBGMSG(...) INFO(__VA_ARGS__)
+#else
+# define DBGMSG(...)
+#endif
+
+#define INVALID_POWER_LEVEL (PLAT_MAX_PWR_LEVEL + 1)
+
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
+static event_t cpu_continue[PLATFORM_CORE_COUNT];
+
+/* MPIDRs for CPU belonging to both own and foreign clusters */
+static u_register_t native_peer = INVALID_MPID;
+static u_register_t foreign_peer = INVALID_MPID;
+
+static test_result_t cpu_ping(void)
+{
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ /* Wait for flag to proceed */
+ tftf_wait_for_event(&cpu_continue[core_pos]);
+
+ /*
+ * When returning from the function, the TFTF framework will power CPUs
+ * down, without this test needing to do anything
+ */
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Helper function to detect support for PSCI NODE_HW_STATE */
+static int is_psci_node_hw_state_supported(void)
+{
+ return (tftf_get_psci_feature_info(SMC_PSCI_CPU_HW_STATE64) ==
+ PSCI_E_NOT_SUPPORTED) ? 0 : 1;
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE for the current CPU and make sure it returns
+ * PSCI_HW_STATE_ON
+ */
+static test_result_t test_self_cpu(void)
+{
+ if (tftf_psci_node_hw_state(read_mpidr_el1(), 0) != PSCI_HW_STATE_ON) {
+ DBGMSG("%s: failed\n", __func__);
+ return TEST_RESULT_FAIL;
+ } else {
+ return TEST_RESULT_SUCCESS;
+ }
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE for the current cluster and make sure it
+ * returns PSCI_HW_STATE_ON
+ */
+static test_result_t test_self_cluster(void)
+{
+ if (tftf_psci_node_hw_state(read_mpidr_el1(), 1) != PSCI_HW_STATE_ON) {
+ DBGMSG("%s: failed\n", __func__);
+ return TEST_RESULT_FAIL;
+ } else {
+ return TEST_RESULT_SUCCESS;
+ }
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE for a foreign CPU that's currently off. Make
+ * sure it returns PSCI_HW_STATE_OFF
+ */
+static test_result_t test_offline_cpu(void)
+{
+ assert(foreign_peer != INVALID_MPID);
+ if (tftf_psci_node_hw_state(foreign_peer, 0) != PSCI_HW_STATE_OFF) {
+ DBGMSG("%s: failed\n", __func__);
+ return TEST_RESULT_FAIL;
+ } else {
+ return TEST_RESULT_SUCCESS;
+ }
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE for a cluster that's currently off. Make sure
+ * it returns PSCI_HW_STATE_OFF
+ */
+static test_result_t test_offline_cluster(void)
+{
+ assert(foreign_peer != INVALID_MPID);
+ if (tftf_psci_node_hw_state(foreign_peer, 1) != PSCI_HW_STATE_OFF) {
+ DBGMSG("%s: failed\n", __func__);
+ return TEST_RESULT_FAIL;
+ } else {
+ return TEST_RESULT_SUCCESS;
+ }
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE with an invalid MPIDR. Make sure it returns
+ * invalid parameters
+ */
+static test_result_t test_invalid_mpidr(void)
+{
+ if (tftf_psci_node_hw_state(INVALID_MPID, 0) != PSCI_E_INVALID_PARAMS) {
+ DBGMSG("%s: failed\n", __func__);
+ return TEST_RESULT_FAIL;
+ } else {
+ return TEST_RESULT_SUCCESS;
+ }
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE with an invalid power_level. Make sure it
+ * returns invalid parameters
+ */
+static test_result_t test_invalid_power_level(void)
+{
+ if (tftf_psci_node_hw_state(read_mpidr_el1(), INVALID_POWER_LEVEL) !=
+ PSCI_E_INVALID_PARAMS) {
+ DBGMSG("%s: failed\n", __func__);
+ return TEST_RESULT_FAIL;
+ } else {
+ return TEST_RESULT_SUCCESS;
+ }
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE on all powered-down CPUs on the system. Verify
+ * that the state was PSCI_HW_STATE_OFF before, but is PSCI_HW_STATE_ON
+ * afterwards
+ */
+static test_result_t test_online_all(void)
+{
+ int cpu_node, pos, state, ret, i;
+ u_register_t mpidr, my_mpidr;
+
+ /* Initialize all events */
+ for (i = 0; i < ARRAY_SIZE(cpu_booted); i++)
+ tftf_init_event(&cpu_booted[i]);
+ for (i = 0; i < ARRAY_SIZE(cpu_continue); i++)
+ tftf_init_event(&cpu_continue[i]);
+
+ DBGMSG("%s: powering cores on...\n", __func__);
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+ DBGMSG("%s: my mpidr: %llx\n", __func__,
+ (unsigned long long) my_mpidr);
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (mpidr == my_mpidr)
+ continue;
+
+ /* Verify that the other CPU is turned off */
+ state = tftf_psci_node_hw_state(mpidr, 0);
+ if (state != PSCI_HW_STATE_OFF) {
+ DBGMSG("%s: before: mpidr %llx: state %u, expected %u\n",
+ __func__, (unsigned long long) mpidr,
+ state, PSCI_HW_STATE_OFF);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Power on the CPU and wait for its event */
+ pos = platform_get_core_pos(mpidr);
+ ret = tftf_cpu_on(mpidr, (uintptr_t) cpu_ping, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ DBGMSG("%s: powering on %llx failed", __func__,
+ (unsigned long long)mpidr);
+ return TEST_RESULT_FAIL;
+ }
+ tftf_wait_for_event(&cpu_booted[pos]);
+
+ /* Verify that the other CPU is turned on */
+ state = tftf_psci_node_hw_state(mpidr, 0);
+ if (state != PSCI_HW_STATE_ON) {
+ DBGMSG("%s: after: mpidr %llx: state %u, expected %u\n",
+ __func__, (unsigned long long)mpidr,
+ state, PSCI_HW_STATE_ON);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Allow to the CPU to proceed to power down */
+ tftf_send_event(&cpu_continue[pos]);
+ }
+
+ /* Wait for other CPUs to power down */
+ INFO("%s: waiting for all other CPUs to power down\n", __func__);
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (mpidr == my_mpidr)
+ continue;
+
+ /* Loop until other CPU is powered down */
+ while (tftf_psci_affinity_info(mpidr, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ tftf_timer_sleep(10);
+ }
+
+ /* Now verify that all CPUs have powered off */
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (mpidr == my_mpidr)
+ continue;
+
+ /* Verify that the other CPU is turned off */
+ state = tftf_psci_node_hw_state(mpidr, 0);
+ if (state != PSCI_HW_STATE_OFF) {
+ DBGMSG("%s: mpidr %llx: state %u, expected %u\n",
+ __func__, (unsigned long long)mpidr,
+ state, PSCI_HW_STATE_OFF);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Find a peer CPU in the system. The 'foreign' argument specifies where to
+ * locate the peer CPU: value zero finds a CPU in the same cluster; non-zero
+ * argument finds CPU from a different cluster.
+ */
+static u_register_t find_peer(int foreign)
+{
+ int dmn, cpu;
+ u_register_t mpidr, my_mpidr;
+
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+ dmn = PWR_DOMAIN_INIT;
+ cpu = PWR_DOMAIN_INIT;
+ do {
+ dmn = tftf_get_next_peer_domain(dmn, foreign);
+ if (foreign) {
+ cpu = tftf_get_next_cpu_in_pwr_domain(dmn,
+ PWR_DOMAIN_INIT);
+ } else {
+ cpu = dmn;
+ }
+
+ assert(cpu != PWR_DOMAIN_INIT);
+ mpidr = tftf_get_mpidr_from_node(cpu);
+ assert(mpidr != INVALID_MPID);
+ } while (mpidr == my_mpidr && dmn != PWR_DOMAIN_INIT);
+
+ return mpidr;
+}
+
+/*
+ * @Test_Aim@ Validate PSCI NODE_HW_STATE API
+ */
+test_result_t test_psci_node_hw_state(void)
+{
+ DBGMSG("%s: begin\n", __func__);
+ if (!is_psci_node_hw_state_supported()) {
+ tftf_testcase_printf("PSCI NODE_HW_STATE is not supported\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ TEST_FUNC(test_invalid_mpidr);
+ TEST_FUNC(test_invalid_power_level);
+ TEST_FUNC(test_self_cpu);
+ TEST_FUNC(test_self_cluster);
+ TEST_FUNC(test_online_all);
+
+ DBGMSG("%s: end\n", __func__);
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Validate PSCI NODE_HW_STATE API in sytems with more than one
+ * cluster
+ */
+test_result_t test_psci_node_hw_state_multi(void)
+{
+ SKIP_TEST_IF_LESS_THAN_N_CLUSTERS(2);
+
+ DBGMSG("%s: begin\n", __func__);
+ if (!is_psci_node_hw_state_supported()) {
+ tftf_testcase_printf("PSCI NODE_HW_STATE is not supported\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Initialize peer MPDIRs */
+ native_peer = find_peer(0);
+ foreign_peer = find_peer(1);
+ DBGMSG("native=%x foreign=%x\n", native_peer, foreign_peer);
+
+ TEST_FUNC(test_offline_cpu);
+ TEST_FUNC(test_offline_cluster);
+
+ DBGMSG("%s: end\n", __func__);
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c
new file mode 100644
index 0000000..64f6c81
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c
@@ -0,0 +1,1004 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <cassert.h>
+#include <debug.h>
+#include <events.h>
+#include <irq.h>
+#include <math_utils.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <spinlock.h>
+#include <string.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+typedef struct psci_stat_data {
+ u_register_t count;
+ u_register_t residency;
+} psci_stat_data_t;
+
+/* Assuming 3 power levels as maximum */
+#define MAX_STAT_STATES (PLAT_MAX_PWR_STATES_PER_LVL * \
+ PLAT_MAX_PWR_STATES_PER_LVL * \
+ PLAT_MAX_PWR_STATES_PER_LVL)
+
+CASSERT(PLAT_MAX_PWR_LEVEL <= 2, assert_maximum_defined_stat_array_size_exceeded);
+
+/*
+ * The data structure holding stat information as queried by each CPU.
+ * We don't worry about cache line thrashing.
+ */
+static psci_stat_data_t stat_data[PLATFORM_CORE_COUNT][PLAT_MAX_PWR_LEVEL + 1]
+ [MAX_STAT_STATES];
+
+/*
+ * Synchronization event for stat tests. A 2-D event array is used to
+ * signal every CPU by each CPU. This caters for the fact some
+ * CPUs may have higher performance than others and will not
+ * cause the synchronization to fail.
+ */
+static event_t stat_sync[PLATFORM_CORE_COUNT][PLATFORM_CORE_COUNT];
+
+/* Global variables to synchronize participating CPUs on wake-up */
+static spinlock_t cpu_count_lock;
+static volatile int cpu_count;
+static volatile int participating_cpu_count;
+
+/* Helper function to wait for CPUs participating in the test */
+static void wait_for_participating_cpus(void)
+{
+ assert(participating_cpu_count <= PLATFORM_CORE_COUNT);
+
+ spin_lock(&cpu_count_lock);
+ cpu_count++;
+ spin_unlock(&cpu_count_lock);
+
+ assert(cpu_count <= PLATFORM_CORE_COUNT);
+
+ while (cpu_count != participating_cpu_count)
+ ;
+}
+
+/* Helper function to detect support for PSCI STAT APIs in firmware */
+static int is_psci_stat_supported(void)
+{
+ int ret_stat_count, ret_stat_res;
+
+ ret_stat_count = tftf_get_psci_feature_info(SMC_PSCI_STAT_COUNT64);
+ ret_stat_res = tftf_get_psci_feature_info(SMC_PSCI_STAT_RESIDENCY64);
+
+ if (ret_stat_count == PSCI_E_NOT_SUPPORTED ||
+ ret_stat_res == PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("PSCI STAT APIs are not supported"
+ " in EL3 firmware\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Utility function to get the `stat` index into the `psci_stat_data_t` array
+ * (which is the index into the 3rd dimension in the array).
+ */
+static int get_stat_idx(unsigned int pstateid_idx[], unsigned int lvl)
+{
+ int i, stat_idx;
+ /* Calculate the stat_idx */
+ for (stat_idx = 0, i = lvl; i >= 0; i--) {
+ assert((pstateid_idx[i] != PWR_STATE_INIT_INDEX) &&
+ (pstateid_idx[i] < PLAT_MAX_PWR_STATES_PER_LVL));
+ stat_idx += (pstateid_idx[i] * pow(PLAT_MAX_PWR_STATES_PER_LVL, i));
+ }
+
+ assert(stat_idx >= 0 && stat_idx < MAX_STAT_STATES);
+ return stat_idx;
+}
+
+/*
+ * Return the pointer the `psci_stat_data_t` array corresponding to
+ * cpu index, power level and `stat` index (which is computed from
+ * the pstateid_idx[]).
+ */
+static psci_stat_data_t *get_psci_stat_data(int cpu_idx,
+ unsigned int pwrlvl,
+ unsigned int pstateid_idx[])
+{
+ int stat_idx;
+
+ /* Calculate the stat_idx */
+ stat_idx = get_stat_idx(pstateid_idx, pwrlvl);
+ return &stat_data[cpu_idx][pwrlvl][stat_idx];
+}
+
+/*
+ * This function validates the current stat results against a previous
+ * snapshot of stat information gathered in `stat_data` by
+ * populate_all_stats_all_lvls() function. It does 2 kinds of validation:
+ *
+ * 1. Precise validation:
+ * This ensures that the targeted power state as indicated by `pstateid_idx`
+ * has incremented according to expectation. If it hasn't incremented,
+ * then the targeted power state was downgraded by the platform (due to various
+ * reasons) and in this case the queried stats should be equal to previous
+ * stats.
+ *
+ * This validation is done for the targeted power level and all lower levels
+ * for the given power state.
+ *
+ * 2. Imprecise validation:
+ *
+ * Iterate over all the power states and ensure that the previous stats for the
+ * power state are never higher than the current one for power levels <=
+ * targeted power level. For power states at higher power levels than targeted
+ * power level, it should remain the same.
+ */
+static int validate_stat_result(unsigned int pstateid_idx[],
+ unsigned int target_pwrlvl)
+{
+ unsigned long my_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_idx = platform_get_core_pos(my_mpid);
+ unsigned int pwrlvl, susp_type, state_id, power_state;
+ int ret;
+ psci_stat_data_t current_stat_data;
+ psci_stat_data_t *pstat_data;
+ unsigned int local_pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+
+ assert(pstateid_idx[0] != PWR_STATE_INIT_INDEX);
+
+ /* Create a local copy of pstateid_idx */
+ memcpy(local_pstateid_idx, pstateid_idx, sizeof(local_pstateid_idx));
+
+ /* First do the precise validation */
+ do {
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ local_pstateid_idx);
+ assert(ret == PSCI_E_SUCCESS);
+ assert(pwrlvl <= PLAT_MAX_PWR_LEVEL);
+
+ power_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+
+ /* Get current stat values for the power state */
+ current_stat_data.residency = tftf_psci_stat_residency(my_mpid, power_state);
+ current_stat_data.count = tftf_psci_stat_count(my_mpid, power_state);
+
+ pstat_data = get_psci_stat_data(cpu_idx, pwrlvl, local_pstateid_idx);
+ if ((pstat_data->residency == current_stat_data.residency) &&
+ (pstat_data->count == current_stat_data.count)) {
+ /*
+ * Targeted power state has been downgraded and the
+ * queried stats should be equal to previous stats
+ */
+ WARN("The power state 0x%x at pwrlvl %d has been"
+ " downgraded by platform\n",
+ power_state, pwrlvl);
+ } else if ((pstat_data->residency > current_stat_data.residency) ||
+ (pstat_data->count + 1 != current_stat_data.count)) {
+ /*
+ * The previous residency is greater than current or the
+ * stat count has not incremented by 1 for the targeted
+ * power state. Return error in this case.
+ */
+ ERROR("Precise validation failed. Stats for CPU %d at"
+ " pwrlvl %d for power state 0x%x : Prev"
+ " stats 0x%llx 0x%llx, current stats"
+ " 0x%llx 0x%llx\n",
+ cpu_idx, pwrlvl, power_state,
+ (unsigned long long)pstat_data->residency,
+ (unsigned long long)pstat_data->count,
+ (unsigned long long)current_stat_data.residency,
+ (unsigned long long)current_stat_data.count);
+ return -1;
+ } else {
+ /*
+ * The stats are as expected for the targeted power state
+ * i.e previous residency <= current residency and
+ * previous stat count + 1 == current stat count.
+ */
+ INFO("The difference in programmed time and residency"
+ " time in us = %lld at power level %d\n",
+ (unsigned long long)
+ ((current_stat_data.residency - pstat_data->residency)
+ - (PLAT_SUSPEND_ENTRY_TIME * 1000)), pwrlvl);
+ }
+
+ local_pstateid_idx[pwrlvl] = PWR_STATE_INIT_INDEX;
+ } while (pwrlvl);
+
+ INIT_PWR_LEVEL_INDEX(local_pstateid_idx);
+
+ /* Imprecise validation */
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, local_pstateid_idx);
+ if (local_pstateid_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ local_pstateid_idx);
+ if (ret != PSCI_E_SUCCESS)
+ continue;
+
+ assert(pwrlvl <= PLAT_MAX_PWR_LEVEL);
+
+ power_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+
+ pstat_data = get_psci_stat_data(cpu_idx, pwrlvl, local_pstateid_idx);
+
+ current_stat_data.residency = tftf_psci_stat_residency(my_mpid,
+ power_state);
+ current_stat_data.count = tftf_psci_stat_count(my_mpid,
+ power_state);
+ if (pwrlvl <= target_pwrlvl) {
+ /*
+ * For all power states that target power domain level
+ * <= `target_pwrlvl, the previous residency and count
+ * should never be greater than current.
+ */
+ if ((pstat_data->residency > current_stat_data.residency) ||
+ (pstat_data->count > current_stat_data.count)) {
+ ERROR("Imprecise validation failed for"
+ " pwrlvl <= target_pwrlvl. Stats for"
+ " CPU %d for power state 0x%x. Prev"
+ " stats 0x%llx 0x%llx, current stats 0x%llx"
+ " 0x%llx\n",
+ cpu_idx, power_state,
+ (unsigned long long)pstat_data->residency,
+ (unsigned long long)pstat_data->count,
+ (unsigned long long)current_stat_data.residency,
+ (unsigned long long)current_stat_data.count);
+ return -1;
+ }
+
+ } else {
+ /*
+ * For all power states that target power domain level
+ * > `target_pwrlvl, the previous residency and count
+ * should never be equal to current.
+ */
+ if ((pstat_data->residency != current_stat_data.residency) ||
+ (pstat_data->count != current_stat_data.count)) {
+ ERROR("Imprecise validation failed for pwrlvl >"
+ " target_pwrlvl. Stats for CPU"
+ " %d for power state 0x%x. Prev"
+ " stats 0x%llx 0x%llx, current stats"
+ " 0x%llx 0x%llx\n",
+ cpu_idx, power_state,
+ (unsigned long long)pstat_data->residency,
+ (unsigned long long)pstat_data->count,
+ (unsigned long long)current_stat_data.residency,
+ (unsigned long long)current_stat_data.count);
+ return -1;
+ }
+ }
+ } while (1);
+
+ return 0;
+}
+
+/*
+ * This function populates the stats for all power states at all power domain
+ * levels for the current CPU in the global `stat_data` array.
+ */
+static void populate_all_stats_all_lvls(void)
+{
+ unsigned int pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ int ret;
+ unsigned int pwrlvl, susp_type, state_id, power_state;
+ psci_stat_data_t *pstat_data;
+ u_register_t mpidr = read_mpidr_el1() & MPID_MASK;
+ int cpu_idx = platform_get_core_pos(mpidr);
+
+ INIT_PWR_LEVEL_INDEX(pstateid_idx);
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, pstateid_idx);
+ if (pstateid_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ pstateid_idx);
+ if (ret != PSCI_E_SUCCESS)
+ continue;
+
+ assert(pwrlvl <= PLAT_MAX_PWR_LEVEL);
+
+ power_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+
+ pstat_data = get_psci_stat_data(cpu_idx, pwrlvl, pstateid_idx);
+ pstat_data->residency = tftf_psci_stat_residency(mpidr,
+ power_state);
+ pstat_data->count = tftf_psci_stat_count(mpidr, power_state);
+ } while (1);
+}
+
+/*
+ * The core function by executed by all CPUs when `test_psci_stat_all_power_states`
+ * test is executed. Each CPU queries the next valid power state using the
+ * `power state` helpers and assumes that the power state progresses from lower
+ * power levels to higher levels. It also assumes that the number of applicable
+ * low power states are same across Big - Little clusters. In future this
+ * assumption may not be true and this test may need to be reworked to have
+ * `power domain` awareness. The sequence executed by the test is as follows:
+ *
+ * 1. Populate the stats for all power states at all power domain levels for
+ * the current CPU.
+ *
+ * 2. Each CPU queries the next applicable low power state using the
+ * `power state` helpers.
+ *
+ * 3. A synchronization point is created for all the CPUs taking part in the
+ * test using TFTF events. This is needed because low power states at higher
+ * power domain levels (like cluster) can only be achieved if all the CPUs
+ * within the power domain request the same low power state at the (nearly)
+ * same time.
+ *
+ * 4. Validate the current stats with the previously cached stats (done in
+ * Step 1).
+ *
+ * 5. Iterate for the next applicable power state.
+ */
+static test_result_t test_psci_stat(void)
+{
+ int ret;
+ unsigned int pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int pwrlvl, state_id, power_state, susp_type, cpu_node;
+ u_register_t mpidr = read_mpidr_el1() & MPID_MASK;
+ int cpu_idx = platform_get_core_pos(mpidr);
+
+ /* Initialize the per-cpu synchronization event */
+ for_each_cpu(cpu_node) {
+ u_register_t target_mpid;
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ tftf_init_event(&stat_sync[platform_get_core_pos(target_mpid)][cpu_idx]);
+ }
+
+ INIT_PWR_LEVEL_INDEX(pstateid_idx);
+
+ do {
+ /* Get the next power state */
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, pstateid_idx);
+ if (pstateid_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ /* Populate the PSCI STATs for all power levels and all states */
+ populate_all_stats_all_lvls();
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl, &susp_type, &state_id,
+ pstateid_idx);
+ if (ret != PSCI_E_SUCCESS)
+ continue;
+
+ power_state = tftf_make_psci_pstate(pwrlvl, susp_type, state_id);
+
+ /*
+ * Create a synchronization point. A 2-D event array is used to
+ * signal every CPU by each CPU. This caters for the fact some
+ * CPUs may have higher performance than others and will not
+ * cause the sychronization to fail.
+ */
+ for_each_cpu(cpu_node) {
+ unsigned int target_idx;
+ target_idx = platform_get_core_pos(
+ tftf_get_mpidr_from_node(cpu_node));
+ tftf_send_event(&stat_sync[target_idx][cpu_idx]);
+ tftf_wait_for_event(&stat_sync[cpu_idx][target_idx]);
+ }
+
+ /*
+ * Initialize the cpu_count to zero for synchronizing
+ * participating CPUs.
+ */
+ spin_lock(&cpu_count_lock);
+ if (cpu_count == participating_cpu_count)
+ cpu_count = 0;
+ spin_unlock(&cpu_count_lock);
+
+ ret = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ power_state, NULL, NULL);
+ tftf_cancel_timer();
+ if (ret) {
+ ERROR("PSCI-STAT: Suspend failed. "
+ "mpidr:0x%llx pwr_lvl:0x%x powerstate:0x%x\n",
+ (unsigned long long)mpidr,
+ pwrlvl, power_state);
+ return TEST_RESULT_FAIL;
+ }
+
+
+ INFO("PSCI-STAT: mpidr:0x%llx pwr_lvl:0x%x powerstate:0x%x\n",
+ (unsigned long long)mpidr,
+ pwrlvl, power_state);
+
+ wait_for_participating_cpus();
+
+ ret = validate_stat_result(pstateid_idx, pwrlvl);
+ if (ret)
+ return TEST_RESULT_FAIL;
+ } while (1);
+
+ /*
+ * Populate the PSCI STATs for all power levels and all states
+ * for final validation from lead CPU
+ */
+ populate_all_stats_all_lvls();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * This function validates the stats for all secondary CPUs from the lead
+ * CPU. It queries the stats for the power states for other CPUs and compares
+ * it against the stats previous cached by them.
+ */
+static int validate_stat_result_from_lead(u_register_t target_cpu)
+{
+ unsigned int pwrlvl, susp_type, state_id, power_state, cpu_idx;
+ int ret;
+ psci_stat_data_t target_stat_data;
+ psci_stat_data_t *pstat_data;
+ unsigned int local_pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+
+ cpu_idx = platform_get_core_pos(target_cpu);
+ INIT_PWR_LEVEL_INDEX(local_pstateid_idx);
+
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, local_pstateid_idx);
+ if (local_pstateid_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ local_pstateid_idx);
+ if (ret != PSCI_E_SUCCESS)
+ continue;
+
+ assert(pwrlvl <= PLAT_MAX_PWR_LEVEL);
+
+ power_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+
+ /* Get target CPU stat values for the power state */
+ target_stat_data.residency = tftf_psci_stat_residency(target_cpu, power_state);
+ target_stat_data.count = tftf_psci_stat_count(target_cpu, power_state);
+
+ pstat_data = get_psci_stat_data(cpu_idx, pwrlvl, local_pstateid_idx);
+ if ((pstat_data->residency != target_stat_data.residency) ||
+ (pstat_data->count != target_stat_data.count)) {
+ INFO("Stats for CPU %d for power state 0x%x :"
+ " Recorded stats 0x%llx 0x%llx,"
+ " Target stats 0x%llx 0x%llx\n",
+ cpu_idx, power_state,
+ (unsigned long long)pstat_data->residency,
+ (unsigned long long)pstat_data->count,
+ (unsigned long long)target_stat_data.residency,
+ (unsigned long long)target_stat_data.count);
+ return -1;
+ }
+ } while (1);
+
+ return 0;
+}
+
+
+/*
+ * @Test_Aim@ Verify if PSCI Stat Count and Residency are updated
+ * correctly for all valid suspend states for every power domain at
+ * various power levels. The `test_psci_stat()` invoked by all the CPUs
+ * participating in the test. The final stats are also validated from the
+ * lead CPU to ensure that stats queried from current CPU and another (lead)
+ * CPU are the same.
+ */
+test_result_t test_psci_stat_all_power_states(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (!is_psci_stat_supported())
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /* Initialize participating CPU count */
+ participating_cpu_count = tftf_get_total_cpus_count();
+ init_spinlock(&cpu_count_lock);
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (lead_mpid == target_mpid)
+ continue;
+
+ ret = tftf_cpu_on(target_mpid, (uintptr_t) test_psci_stat, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ if (test_psci_stat() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ INFO("Validating stats from lead CPU\n");
+
+ /* Validate the stat results from the lead CPU */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (lead_mpid == target_mpid)
+ continue;
+
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ ;
+
+ ret = validate_stat_result_from_lead(target_mpid);
+ if (ret)
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Helper function for the secondary CPUs to boot and populate its stats
+ * and power OFF.
+ */
+static test_result_t update_stats_and_power_off(void)
+{
+ wait_for_participating_cpus();
+
+ populate_all_stats_all_lvls();
+ return TEST_RESULT_SUCCESS;
+}
+
+/* The target level for the stats validation */
+static int verify_stats_target_lvl;
+
+/*
+ * This is lighter validation of stat results than `validate_stat_result()`.
+ * This function iterates over all the power states of type
+ * `PSTATE_TYPE_POWERDOWN` for the current CPU and ensures that stats
+ * corresponding to at least a single power state targeted to a power level
+ * <= `verify_stats_target_lvl` have incremented as expected. If the stats
+ * haven't incremented corresponding to a power state, then they must be
+ * equal to the previous stats or else return FAILURE.
+ */
+static test_result_t verify_powerdown_stats(void)
+{
+ int ret;
+ unsigned int stateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int pwrlvl, susp_type, state_id, power_state;
+ psci_stat_data_t *pstat_data, curr_stat_data;
+ u_register_t mpidr = read_mpidr_el1() & MPID_MASK;
+ int cpu_idx = platform_get_core_pos(mpidr);
+
+ test_result_t result = TEST_RESULT_FAIL;
+
+ INIT_PWR_LEVEL_INDEX(stateid_idx);
+
+ wait_for_participating_cpus();
+
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, stateid_idx);
+ if (stateid_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ stateid_idx);
+ if ((ret != PSCI_E_SUCCESS) ||
+ (susp_type != PSTATE_TYPE_POWERDOWN))
+ continue;
+
+ power_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+ pstat_data = get_psci_stat_data(cpu_idx, pwrlvl, stateid_idx);
+ curr_stat_data.residency = tftf_psci_stat_residency(mpidr,
+ power_state);
+ curr_stat_data.count = tftf_psci_stat_count(mpidr, power_state);
+
+ if ((curr_stat_data.count == (pstat_data->count + 1)) &&
+ (curr_stat_data.residency >= pstat_data->residency)) {
+ /*
+ * If the stats for at least a single power state
+ * targeted to a pwrlvl <= `verify_stats_target_lvl`
+ * satisfy the condition then set result to SUCCESS
+ */
+ if (verify_stats_target_lvl >= pwrlvl)
+ result = TEST_RESULT_SUCCESS;
+ } else if ((curr_stat_data.count != pstat_data->count) ||
+ (curr_stat_data.residency != pstat_data->residency)) {
+
+ /*
+ * If the stats havent incremented, then they should be
+ * equal to previous.
+ */
+ ERROR("Stats for CPU %d for power state 0x%x :"
+ " Recorded stats 0x%llx 0x%llx,"
+ " current stats 0x%llx 0x%llx\n",
+ cpu_idx, power_state,
+ (unsigned long long)pstat_data->residency,
+ (unsigned long long)pstat_data->count,
+ (unsigned long long)curr_stat_data.residency,
+ (unsigned long long)curr_stat_data.count);
+
+ return TEST_RESULT_FAIL;
+ }
+ } while (1);
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ Validate PSCI stats after calling CPU_OFF on each secondary core.
+ * The test sequence is as follows:
+ *
+ * 1. Invoke CPU_ON on all secondary cores which wakeup and update
+ * the stats corresponding to all power states.
+ *
+ * 2. After secondaries have turned OFF, suspend the lead CPU for a short time
+ * duration.
+ *
+ * 3. On wakeup from suspend, turn ON all secondaries which then validate the
+ * stats by invoking `verify_powerdown_stats()`.
+ */
+test_result_t test_psci_stats_cpu_off(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, off_cpu_count = 0, ret;
+ unsigned int power_state, stateid;
+
+ if (!is_psci_stat_supported())
+ return TEST_RESULT_SKIPPED;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /*
+ * The primary CPU is an external observer in this test.
+ * Count it out of the participating CPUs pool.
+ */
+ participating_cpu_count = tftf_get_total_cpus_count() - 1;
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Turn on each secondary and update the stats. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (lead_mpid == target_mpid)
+ continue;
+
+ /*
+ * cpu_count will be updated by the secondary CPUs when they
+ * execute `update_stats_and_power_off` function.
+ */
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) update_stats_and_power_off, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+
+ }
+
+ /* Wait for the secondary to turn OFF */
+ for_each_cpu(cpu_node) {
+
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ ;
+ off_cpu_count++;
+ }
+
+ assert(off_cpu_count == participating_cpu_count);
+ cpu_count = 0;
+
+ ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
+ PSTATE_TYPE_STANDBY, &stateid);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("Failed to construct composite state\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0,
+ PSTATE_TYPE_STANDBY, stateid);
+ ret = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ power_state, NULL, NULL);
+ if (ret != 0) {
+ ERROR("Failed to program timer or suspend CPU: 0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_cancel_timer();
+
+ /* That target level for CPU OFF is 0, (CPU power domain level) */
+ verify_stats_target_lvl = 0;
+
+ /* Now turn on each secondary and verify the stats */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (lead_mpid == target_mpid)
+ continue;
+
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) verify_powerdown_stats, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wait for the secondary to turn OFF */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ ;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Validate PSCI stats after SYSTEM SUSPEND.
+ * The test sequence is as follows:
+ *
+ * 1. Invoke CPU_ON on all secondary cores which wakeup and update
+ * the stats corresponding to all power states.
+ *
+ * 2. After secondaries have turned OFF, invoke SYSTEM SUSPEND on the lead CPU.
+ *
+ * 3. On wakeup from suspend, turn ON all secondaries which then validate the
+ * stats by invoking `verify_powerdown_stats()`.
+ */
+test_result_t test_psci_stats_system_suspend(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, off_cpu_count = 0;
+ int ret;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_SYSTEM_SUSPEND64);
+ if (ret == PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("SYSTEM_SUSPEND not supported"
+ " in EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (!is_psci_stat_supported())
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /* Initialize participating CPU count. The lead CPU is excluded in the count */
+ participating_cpu_count = tftf_get_total_cpus_count() - 1;
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Turn on each secondary and update the stats. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (lead_mpid == target_mpid)
+ continue;
+
+ /*
+ * cpu_count will be updated by the secondary CPUs when they
+ * execute `update_stats_and_power_off` function.
+ */
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) update_stats_and_power_off, 0);
+ if (ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait for the secondary to turn OFF */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ ;
+
+ off_cpu_count++;
+ }
+
+ assert(off_cpu_count == participating_cpu_count);
+ cpu_count = 0;
+
+ /* Update the stats corresponding to the lead CPU as well */
+ populate_all_stats_all_lvls();
+
+ /* Program timer to fire after delay and issue system suspend */
+ ret = tftf_program_timer_and_sys_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ NULL, NULL);
+ tftf_cancel_timer();
+ if (ret)
+ return TEST_RESULT_FAIL;
+
+ /* That target level for SYSTEM SUSPEND is PLAT_MAX_PWR_LEVEL */
+ verify_stats_target_lvl = PLAT_MAX_PWR_LEVEL;
+
+ /* Now turn on each secondary CPU and verify the stats */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (lead_mpid == target_mpid)
+ continue;
+
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) verify_powerdown_stats, 0);
+ if (ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait for the secondary CPU to turn OFF */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ ;
+ }
+
+ /* Increment the participating CPU count to include the lead CPU as well */
+ participating_cpu_count++;
+
+ /* Verify the stats on the lead CPU as well */
+ return verify_powerdown_stats();
+}
+
+/*
+ * This function verifies the stats for all power states after a cold boot.
+ * Basically the stats should be zero immediately after cold boot.
+ *
+ * As part of its initialization, the test framework puts the primary
+ * CPU in CPU standby state (in order to detect the power state format).
+ * This directly affects the PSCI statistics for the primary CPU.
+ * This means that, when entering this test function, the CPU Standby
+ * state statistics for the primary CPU may no longer be zero.
+ * Thus, we just ignore it and don't test it.
+ */
+static test_result_t verify_psci_stats_cold_boot(void)
+{
+ int ret, cpu_node;
+ unsigned int stateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int pwrlvl, susp_type, state_id, power_state;
+ u_register_t target_mpid, lead_cpu, stat_count, stat_residency;
+
+ lead_cpu = read_mpidr_el1() & MPID_MASK;
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ INIT_PWR_LEVEL_INDEX(stateid_idx);
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, stateid_idx);
+ if (stateid_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ stateid_idx);
+ if (ret != PSCI_E_SUCCESS)
+ continue;
+
+ if ((target_mpid == lead_cpu) && (pwrlvl == 0) &&
+ (susp_type == PSTATE_TYPE_STANDBY))
+ continue;
+
+ power_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+ stat_residency = tftf_psci_stat_residency(target_mpid, power_state);
+ stat_count = tftf_psci_stat_count(target_mpid, power_state);
+ if (stat_count || stat_residency) {
+ ERROR("mpid = %lld, power_state = %x, "
+ "stat count = %lld, residency = %lld\n",
+ (unsigned long long) target_mpid,
+ power_state,
+ (unsigned long long) stat_count,
+ (unsigned long long) stat_residency);
+ return TEST_RESULT_FAIL;
+ }
+ } while (1);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Validate PSCI stats for each valid composite
+ * power state after system shutdown
+ */
+test_result_t test_psci_stats_after_shutdown(void)
+{
+ smc_args args = { SMC_PSCI_SYSTEM_OFF };
+
+ if (!is_psci_stat_supported())
+ return TEST_RESULT_SKIPPED;
+
+ if (tftf_is_rebooted()) {
+ /*
+ * Successfully resumed from system off. Verify cold
+ * boot stats.
+ */
+ return verify_psci_stats_cold_boot();
+ }
+
+ tftf_notify_reboot();
+ tftf_smc(&args);
+
+ /* The PSCI SYSTEM_OFF call is not supposed to return */
+ tftf_testcase_printf("System didn't shutdown properly\n");
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * @Test_Aim@ Validate PSCI stats for each valid composite
+ * power state after system reset
+ */
+test_result_t test_psci_stats_after_reset(void)
+{
+ smc_args args = { SMC_PSCI_SYSTEM_RESET };
+
+ if (!is_psci_stat_supported())
+ return TEST_RESULT_SKIPPED;
+
+ if (tftf_is_rebooted()) {
+ /*
+ * Successfully resumed from system reset. Verify cold
+ * boot stats.
+ */
+ return verify_psci_stats_cold_boot();
+ }
+
+ tftf_notify_reboot();
+ tftf_smc(&args);
+
+ /* The PSCI SYSTEM_RESET call is not supposed to return */
+ tftf_testcase_printf("System didn't reset properly\n");
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_version/test_psci_version.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_version/test_psci_version.c
new file mode 100644
index 0000000..27f2a0a
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_version/test_psci_version.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <psci.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+
+/*
+ * @Test_Aim@ Check the version of PSCI implemented
+ *
+ * This test relies on the PSCI_VERSION SMC call.
+ * It expects versions 0.1, 0.2, 1.0, 1.1
+ */
+test_result_t test_psci_version(void)
+{
+ int version;
+
+ version = tftf_get_psci_version();
+ if (!tftf_is_valid_psci_version(version)) {
+ tftf_testcase_printf(
+ "Wrong PSCI version:0x%08x\n", version);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/reset2/reset2.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/reset2/reset2.c
new file mode 100644
index 0000000..dfc1b35
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/reset2/reset2.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <psci.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#define SENTINEL 0x55
+#define INVALID_ARCH_RESET 0x00000001
+#define INVALID_VENDOR_RESET 0x80000002
+#define MEM_PROTECT_ENABLE 1
+#define MEM_PROTECT_DISABLE 0
+
+
+/*
+ * Test warm reset using PSCI RESET2 call (parameter 0)
+ * Returns:
+ * TEST_RESULT_SUCCESS: The system resets after calling RESET2
+ * TEST_RESULT_FAIL: The RESET2 PSCI call failed
+ */
+static test_result_t reset2_warm_helper(void *arg)
+{
+ smc_args args = { SMC_PSCI_RESET2, 0};
+ unsigned char *sentinel = arg;
+ unsigned char value;
+
+ assert(sentinel != NULL);
+
+ if (tftf_is_rebooted()) {
+ value = *sentinel;
+ if (value != SENTINEL) {
+ tftf_testcase_printf("Sentinel address modified\n");
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+ }
+ *sentinel = SENTINEL;
+
+ tftf_notify_reboot();
+ tftf_smc(&args);
+
+ /* The PSCI RESET2 call is not supposed to return */
+ tftf_testcase_printf("System didn't shutdown properly\n");
+ return TEST_RESULT_FAIL;
+}
+
+test_result_t reset2_warm(void)
+{
+ map_args_unmap_t args;
+ unsigned char *sentinel;
+ int ret;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_RESET2);
+ if (ret == PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("PSCI RESET2 is not supported %d\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ sentinel = psci_mem_prot_get_sentinel();
+ if (sentinel == NULL) {
+ tftf_testcase_printf("Could not find a suitable address for the sentinel.\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ args.addr = (uintptr_t) sentinel & ~PAGE_SIZE_MASK;
+ args.size = PAGE_SIZE;
+ args.attr = MT_RW_DATA;
+ args.arg = sentinel;
+
+ return map_test_unmap(&args, reset2_warm_helper);
+}
+
+/*
+ * Test correct error handling
+ * Returns:
+ * TEST_RESULT_SUCCESS: If the system catches all the wrong calls
+ * TEST_RESULT_FAIL: Some PSCI call failed
+ */
+test_result_t reset2_test_invalid(void)
+{
+ smc_args args = {SMC_PSCI_RESET2};
+ smc_ret_values ret_vals;
+ int ret;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_RESET2);
+ if (ret == PSCI_E_NOT_SUPPORTED)
+ return TEST_RESULT_SKIPPED;
+
+ args.arg1 = INVALID_VENDOR_RESET;
+ ret_vals = tftf_smc(&args);
+ ret = ret_vals.ret0;
+
+ if (ret >= 0)
+ return TEST_RESULT_FAIL;
+
+ args.arg1 = INVALID_ARCH_RESET;
+ ret_vals = tftf_smc(&args);
+ ret = ret_vals.ret0;
+
+ if (ret >= 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Test combination of reset2 and mem_protect
+ * Returns:
+ * TEST_RESULT_SUCCESS: if the system is reseted and mem_protect
+ * is disabled.
+ * TEST_RESULT_FAIL: Some PSCI call failed or mem_protect wasn't
+ * disabled
+ */
+static test_result_t reset2_mem_protect_helper(void *arg)
+{
+ int ret;
+ unsigned char value;
+ smc_args args = { SMC_PSCI_RESET2, 0};
+ unsigned char *sentinel = arg;
+
+ assert(sentinel != NULL);
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_MEM_PROTECT);
+ if (ret == PSCI_E_NOT_SUPPORTED)
+ return TEST_RESULT_SKIPPED;
+
+ if (tftf_is_rebooted()) {
+ if (psci_mem_protect(0) != MEM_PROTECT_DISABLE) {
+ tftf_testcase_printf("mem_protect is not disabled");
+ return TEST_RESULT_SUCCESS;
+ }
+ value = *sentinel;
+ if (value != SENTINEL) {
+ tftf_testcase_printf("Sentinel address modified\n");
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+ }
+
+ *sentinel = SENTINEL;
+
+ ret = psci_mem_protect(0);
+ if (ret != MEM_PROTECT_ENABLE && ret != MEM_PROTECT_DISABLE) {
+ tftf_testcase_printf("error calling mem_protect");
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_notify_reboot();
+ tftf_smc(&args);
+
+ /* The PSCI RESET2 call is not supposed to return */
+ tftf_testcase_printf("System didn't shutdown properly\n");
+ return TEST_RESULT_FAIL;
+}
+
+test_result_t reset2_mem_protect(void)
+{
+ map_args_unmap_t args;
+ unsigned char *sentinel;
+ int ret;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_RESET2);
+ if (ret == PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("PSCI RESET2 is not supported %d\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ sentinel = psci_mem_prot_get_sentinel();
+ if (sentinel == NULL) {
+ tftf_testcase_printf("Could not find a suitable address for the sentinel.\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ args.addr = (uintptr_t) sentinel & ~PAGE_SIZE_MASK;
+ args.size = PAGE_SIZE;
+ args.attr = MT_RW_DATA;
+ args.arg = sentinel;
+
+ return map_test_unmap(&args, reset2_mem_protect_helper);
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/system_off/test_system_off.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/system_off/test_system_off.c
new file mode 100644
index 0000000..2ac6550
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/system_off/test_system_off.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <psci.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+
+/*
+ * @Test_Aim@ Validate the SYSTEM_OFF call.
+ * Test SUCCESS in case of system shutdown.
+ * Test FAIL in case of execution not terminated.
+ */
+test_result_t test_system_off(void)
+{
+ smc_args args = { SMC_PSCI_SYSTEM_OFF };
+
+ if (tftf_is_rebooted()) {
+ /* Successfully resumed from system off */
+ return TEST_RESULT_SUCCESS;
+ }
+
+ tftf_notify_reboot();
+ tftf_smc(&args);
+
+ /* The PSCI SYSTEM_OFF call is not supposed to return */
+ tftf_testcase_printf("System didn't shutdown properly\n");
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/system_suspend/test_psci_system_suspend.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/system_suspend/test_psci_system_suspend.c
new file mode 100644
index 0000000..04c5bf8
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/system_suspend/test_psci_system_suspend.c
@@ -0,0 +1,885 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <debug.h>
+#include <events.h>
+#include <gic_v2.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <test_helpers.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#define SUSPEND_TIME_3_SECS 3000
+#define SUSPEND_TIME_10_SECS 10000
+#define TEST_ITERATION_COUNT 0x5
+
+/* Helper macro to verify if system suspend API is supported */
+#define is_psci_sys_susp64_supported() \
+ (tftf_get_psci_feature_info(SMC_PSCI_SYSTEM_SUSPEND64) != \
+ PSCI_E_NOT_SUPPORTED)
+
+static unsigned int deepest_power_state;
+static unsigned int test_target_node = PWR_DOMAIN_INIT;
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+static event_t sgi_received[PLATFORM_CORE_COUNT];
+static event_t waitq[PLATFORM_CORE_COUNT];
+
+static volatile int wakeup_irq_rcvd[PLATFORM_CORE_COUNT];
+static volatile unsigned int sgi_handled[PLATFORM_CORE_COUNT];
+static sgi_data_t sgi_data;
+static volatile int cpu_ref_count;
+
+extern unsigned long __RO_START__;
+#define TFTF_RO_START (unsigned long)(&__RO_START__)
+extern unsigned long __RO_END__;
+#define TFTF_RO_END (unsigned long)(&__RO_END__)
+
+static int suspend_wakeup_handler(void *data)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ assert(wakeup_irq_rcvd[core_pos] == 0);
+ wakeup_irq_rcvd[core_pos] = 1;
+
+ return 0;
+}
+
+static int sgi_handler(void *data)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ sgi_data = *(sgi_data_t *) data;
+ sgi_handled[core_pos] = 1;
+ return 0;
+}
+
+/*
+ * Iterate over all cores and issue system suspend
+ * After returning from suspend, ensure that the core which entered
+ * suspend resumed from suspend.
+ */
+static test_result_t sys_suspend_from_all_cores(void)
+{
+ unsigned long long my_mpid = read_mpidr_el1() & MPID_MASK, target_mpid;
+ unsigned int core_pos = platform_get_core_pos(my_mpid);
+ int ret;
+ int psci_ret;
+
+ /* Increment the count of CPUs in the test */
+ cpu_ref_count++;
+ dsbsy();
+
+ while (!is_sys_suspend_state_ready())
+ ;
+
+ wakeup_irq_rcvd[core_pos] = 0;
+
+ /* Register timer handler */
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ /* Program timer to fire after delay */
+ ret = tftf_program_timer_and_sys_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ NULL, NULL);
+
+ /* Wait until the IRQ wake interrupt is received */
+ while (!wakeup_irq_rcvd[core_pos])
+ ;
+
+ if (ret) {
+ tftf_testcase_printf("Failed to program timer or suspend "
+ "system from core %x\n", core_pos);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Unregister time handler */
+ tftf_timer_unregister_handler();
+ tftf_cancel_timer();
+
+ /* Done with the suspend test. Decrement count */
+ cpu_ref_count--;
+ dsbsy();
+
+ test_target_node = tftf_topology_next_cpu(test_target_node);
+ if (test_target_node != PWR_DOMAIN_INIT) {
+ target_mpid = tftf_get_mpidr_from_node(test_target_node);
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) sys_suspend_from_all_cores,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d) \n",
+ (unsigned int)target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Wait for the target CPU to enter the test. The TFTF framework
+ * requires more than one CPU to be in the test to detect that the
+ * test has not finished.
+ */
+ while (!cpu_ref_count)
+ ;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Functionality test : Issue system suspend from all cores
+ * sequentially. This test ensures that system suspend can be issued
+ * from all cores and right core is resumed from system suspend.
+ */
+test_result_t test_system_suspend_from_all_cores(void)
+{
+ unsigned long long target_mpid, my_mpid;
+ int psci_ret;
+
+ test_target_node = PWR_DOMAIN_INIT;
+ my_mpid = read_mpidr_el1() & MPID_MASK;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i)
+ tftf_init_event(&cpu_ready[i]);
+
+ test_target_node = tftf_topology_next_cpu(test_target_node);
+ assert(test_target_node != PWR_DOMAIN_INIT);
+
+ target_mpid = tftf_get_mpidr_from_node(test_target_node);
+ if (target_mpid == my_mpid)
+ return sys_suspend_from_all_cores();
+
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) sys_suspend_from_all_cores,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d) \n",
+ (unsigned int)target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Wait for the target CPU to enter the test. The TFTF framework
+ * requires more than one CPU to be in the test to detect that the
+ * test has not finished.
+ */
+ while (!cpu_ref_count)
+ ;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Helper function to issue SYSTEM SUSPEND SMC with custom parameters.
+ */
+int sys_suspend_helper(uintptr_t entry_point_address,
+ u_register_t context_id)
+{
+ smc_args args = {
+ SMC_PSCI_SYSTEM_SUSPEND,
+ (uintptr_t)entry_point_address,
+ (u_register_t)context_id
+ };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+
+ return ret_vals.ret0;
+}
+
+/*
+ * Function to issue system suspend with invalid entry-point on all cores
+ * sequentially.
+ */
+static test_result_t invalid_entrypoint_for_sys_suspend(void)
+{
+ unsigned long long target_mpid;
+ int psci_ret;
+
+ /* Increment the count of CPUs in the test */
+ cpu_ref_count++;
+ dsbsy();
+
+ while (!is_sys_suspend_state_ready())
+ ;
+
+ psci_ret = sys_suspend_helper((uintptr_t) 0x1, 0);
+ if (psci_ret != PSCI_E_INVALID_ADDRESS) {
+ tftf_testcase_printf("Test failed with invalid entry addr %x\n",
+ psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Done with the suspend test. Decrement count */
+ cpu_ref_count--;
+ dsbsy();
+
+ test_target_node = tftf_topology_next_cpu(test_target_node);
+ if (test_target_node != PWR_DOMAIN_INIT) {
+ target_mpid = tftf_get_mpidr_from_node(test_target_node);
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) invalid_entrypoint_for_sys_suspend,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d) \n",
+ (unsigned int)target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Wait for the target CPU to enter the test. The TFTF framework
+ * requires more than one CPU to be in the test to detect that the
+ * test has not finished.
+ */
+ while (!cpu_ref_count)
+ ;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ API test: Issue system suspend with invalid entrypoint on all
+ * cores. It should return error.
+ */
+test_result_t test_system_suspend_invalid_entrypoint(void)
+{
+ unsigned long long target_mpid, my_mpid;
+ int psci_ret;
+
+ test_target_node = PWR_DOMAIN_INIT;
+ my_mpid = read_mpidr_el1() & MPID_MASK;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i)
+ tftf_init_event(&cpu_ready[i]);
+
+ test_target_node = tftf_topology_next_cpu(test_target_node);
+ assert(test_target_node != PWR_DOMAIN_INIT);
+
+ target_mpid = tftf_get_mpidr_from_node(test_target_node);
+ if (target_mpid == my_mpid)
+ return invalid_entrypoint_for_sys_suspend();
+
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) invalid_entrypoint_for_sys_suspend,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d) \n",
+ (unsigned int)target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Wait for the target CPU to enter the test. The TFTF framework
+ * requires more than one CPU to be in the test to detect that the
+ * test has not finished.
+ */
+ while (!cpu_ref_count)
+ ;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Function to test Non lead CPU response to SGIs after multiple invocations
+ * of system suspend.
+ */
+static test_result_t non_lead_cpu_sgi_test(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ const unsigned int sgi_id = IRQ_NS_SGI_0;
+ int sgi_ret;
+
+ /* Register the local IRQ handler for the SGI */
+ sgi_ret = tftf_irq_register_handler(sgi_id, sgi_handler);
+ if (sgi_ret != 0) {
+ tftf_testcase_printf("Failed to register IRQ %u (%d)",
+ sgi_id, sgi_ret);
+ return TEST_RESULT_FAIL;
+ }
+ /* Enable SGI */
+ tftf_irq_enable(sgi_id, GIC_HIGHEST_NS_PRIORITY);
+
+ /* Signal to the lead CPU that we are ready to receive SGI */
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ /* Wait for SGI */
+ while (sgi_handled[core_pos] == 0)
+ ;
+ /* Send event to indicate reception of SGI */
+ tftf_send_event(&sgi_received[core_pos]);
+
+ /* Unregister SGI handler */
+ tftf_irq_disable(sgi_id);
+ tftf_irq_unregister_handler(sgi_id);
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Functionality test: Issue system suspend multiple times with
+ * all non-lead cores as OFF. This test ensures invoking system suspend
+ * multiple times on lead core does not have any issue.
+ * Steps:
+ * - Register timer wakeup event and issue system suspend multiple
+ * times. Ensure that the system suspend succeeds.
+ * - Turn on all the non lead CPU and send SGIs to them to ensure that
+ * all the non lead CPU are responsive.
+ */
+test_result_t test_psci_sys_susp_multiple_iteration(void)
+{
+ unsigned int target_mpid, target_node;
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(lead_mpid);
+ const unsigned int sgi_id = IRQ_NS_SGI_0;
+ int psci_ret;
+ int timer_ret;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i) {
+ tftf_init_event(&cpu_ready[i]);
+ tftf_init_event(&sgi_received[i]);
+ }
+
+ /* Register timer handler */
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ for (unsigned int i = 0; i < TEST_ITERATION_COUNT; i++) {
+ wakeup_irq_rcvd[core_pos] = 0;
+
+ /*
+ * Program the wakeup timer, this will serve as the wake-up event
+ * to come out of suspend state, and issue system suspend
+ */
+ tftf_program_timer_and_sys_suspend(
+ PLAT_SUSPEND_ENTRY_TIME, &timer_ret, &psci_ret);
+
+ while (!wakeup_irq_rcvd[core_pos])
+ ;
+
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("System suspend failed with return value %i\n",
+ psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ if (timer_ret) {
+ tftf_testcase_printf("Timer programming failed with return value %i\n",
+ timer_ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ tftf_cancel_timer();
+ /* Unregister timer handler */
+ tftf_timer_unregister_handler();
+
+ /* Turn on all cores after test to ensure all cores boot up*/
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ core_pos = platform_get_core_pos(target_mpid);
+
+ if (target_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) non_lead_cpu_sgi_test,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ /* Send SGI to all non lead CPUs and ensure that they receive it */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_send_sgi(sgi_id, core_pos);
+ tftf_wait_for_event(&sgi_received[core_pos]);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Functionality test : Issue system suspend with pending
+ * SGI on calling core. System suspend call should return prior to the
+ * programmed wake-up interval.
+ * Steps:
+ * - Mask the interrupts on lead CPU and send SGI to current CPU
+ * - Configure a wake-up timer and issue SYSTEM SUSPEND
+ * - Unmask the interrupt and verify that current CPU has woken
+ * prior to the wake-up timer firing.
+ */
+test_result_t test_psci_sys_susp_pending_irq(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ const unsigned int sgi_id = IRQ_NS_SGI_0;
+ int sgi_ret;
+ int psci_ret;
+ test_result_t ret = TEST_RESULT_SUCCESS;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Initialize variables */
+ sgi_handled[core_pos] = 0;
+ wakeup_irq_rcvd[core_pos] = 0;
+
+ /* Register the local IRQ handler for the SGI */
+ sgi_ret = tftf_irq_register_handler(sgi_id, sgi_handler);
+ if (sgi_ret != 0) {
+ tftf_testcase_printf("Failed to register IRQ %u (%d)",
+ sgi_id, sgi_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Register for timer interrupt */
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ /*
+ * Program the MB timer, for 3 secs to fire timer interrupt if
+ * system enters suspend state with pending IRQ
+ */
+ tftf_program_timer(SUSPEND_TIME_3_SECS);
+
+ tftf_irq_enable(sgi_id, GIC_HIGHEST_NS_PRIORITY);
+ disable_irq();
+
+ /* Send the SGI to the lead CPU */
+ tftf_send_sgi(sgi_id, core_pos);
+
+ /* Check if system enters suspend state with pending IRQ or not */
+ psci_ret = tftf_system_suspend();
+
+ /* Un-mask the interrupt */
+ enable_irq();
+
+ /*
+ * If the wake-up timer has fired, then the pending interrupt did
+ * not have any effect on the SYSTEM SUSPEND which means the
+ * test case failed.
+ *
+ */
+ if (wakeup_irq_rcvd[core_pos]) {
+ tftf_testcase_printf("Timer irq received\n");
+ ret = TEST_RESULT_FAIL;
+ }
+
+ /* Wait for the SGI to be handled */
+ while (sgi_handled[core_pos] == 0)
+ ;
+
+ /* Verify the sgi data received by the SGI handler */
+ if (sgi_data.irq_id != sgi_id) {
+ tftf_testcase_printf("Wrong IRQ ID, expected %u, got %u\n",
+ sgi_id, sgi_data.irq_id);
+ ret = TEST_RESULT_FAIL;
+ }
+
+ if (psci_ret != PSCI_E_SUCCESS)
+ ret = TEST_RESULT_FAIL;
+
+ /* Unregister timer handler */
+ tftf_timer_unregister_handler();
+ tftf_cancel_timer();
+
+ /* Unregister SGI handler */
+ tftf_irq_disable(sgi_id);
+ tftf_irq_unregister_handler(sgi_id);
+
+ return ret;
+}
+
+/* Helper function to calculate checksum of a given DRAM area */
+unsigned long check_data_integrity(unsigned int *addr, unsigned int size)
+{
+ unsigned int chksum = 0;
+ unsigned int i;
+
+ for (i = 0; i < (size/sizeof(unsigned int)); i++)
+ chksum += *(addr + i);
+ return chksum;
+}
+
+/*
+ * @Test_Aim@ Functionality Test: Ensure that RAM contents are preserved on
+ * resume from system suspend
+ * Steps:
+ * - Write a known pattern to the DRAM and calculate the hash.
+ * - Configure wake-up timer and issue SYSTEM SUSPEND on lead CPU.
+ * - Recalculate the hash of the DRAM and compare it with the previous
+ * value. Both the hash values should match.
+ *
+ */
+test_result_t test_psci_sys_susp_validate_ram(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned long prev_hash_val = 0;
+ unsigned long present_hash_val = 0;
+ int psci_ret;
+ int timer_ret;
+
+ test_result_t ret = TEST_RESULT_SUCCESS;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ wakeup_irq_rcvd[core_pos] = 0;
+
+ /* Check hash on known region of RAM before putting into suspend */
+ prev_hash_val = check_data_integrity((unsigned int *)TFTF_RO_START,
+ TFTF_RO_END - TFTF_RO_START);
+
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ /*
+ * Program timer to fire interrupt after timer expires and issue
+ * system suspend
+ */
+ tftf_program_timer_and_sys_suspend(SUSPEND_TIME_10_SECS,
+ &timer_ret, &psci_ret);
+
+ while (!wakeup_irq_rcvd[core_pos])
+ ;
+ if (psci_ret == PSCI_E_SUCCESS) {
+ /*
+ * Check hash on known region of RAM after returning
+ * from suspend
+ */
+ present_hash_val = check_data_integrity(
+ (unsigned int *)TFTF_RO_START,
+ TFTF_RO_END - TFTF_RO_START);
+ if (present_hash_val != prev_hash_val) {
+ tftf_testcase_printf("ERROR: RAM data not retained \n");
+ ret = TEST_RESULT_FAIL;
+ }
+ } else {
+ tftf_testcase_printf("Failed: system suspend to RAM \n");
+ ret = TEST_RESULT_FAIL;
+ }
+
+ if (timer_ret) {
+ tftf_testcase_printf("Failed: timer programming \n");
+ ret = TEST_RESULT_FAIL;
+ }
+
+ /* Unregister timer handler */
+ tftf_timer_unregister_handler();
+ tftf_cancel_timer();
+
+ return ret;
+}
+
+/* Helper function to get deepest power state */
+static unsigned int get_deepest_power_state(void)
+{
+ unsigned int test_suspend_type;
+ unsigned int suspend_state_id;
+ unsigned int power_level;
+ unsigned int power_state = 0;
+ unsigned int pstate_id_idx[PLAT_MAX_PWR_LEVEL + 1];
+ int ret;
+
+ INIT_PWR_LEVEL_INDEX(pstate_id_idx);
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, pstate_id_idx);
+
+ if (pstate_id_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ ret = tftf_get_pstate_vars(&power_level,
+ &test_suspend_type,
+ &suspend_state_id,
+ pstate_id_idx);
+ if (ret)
+ continue;
+
+ power_state = tftf_make_psci_pstate(power_level,
+ test_suspend_type,
+ suspend_state_id);
+
+ } while (1);
+
+ return power_state;
+}
+
+/*
+ * Suspend non-lead cores
+ */
+static test_result_t suspend_non_lead_cpu(void)
+{
+ unsigned long long mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ int ret;
+
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ /* Tell the lead CPU that the calling CPU is about to suspend itself */
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ ret = tftf_cpu_suspend(deepest_power_state);
+ tftf_irq_disable(IRQ_NS_SGI_0);
+
+ if (ret) {
+ ERROR(" CPU suspend failed with error %x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ API Test: Issue system suspend on a core while other
+ * cores are in suspend. This test ensures that system suspend will
+ * not be successful if cores other than core issuing suspend are not
+ * in OFF state.
+ * Steps:
+ * - Turn on non lead CPUs and suspend it to the deepest suspend
+ * power state.
+ * - Issue SYSTEM SUSPEND on primary CPU. The API should return
+ * error.
+ *
+ */
+test_result_t test_psci_sys_susp_with_cores_in_suspend(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int target_mpid, target_node;
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ int psci_ret;
+ int timer_ret;
+ test_result_t ret = TEST_RESULT_SUCCESS;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ SKIP_TEST_IF_LESS_THAN_N_CLUSTERS(2);
+
+ for (unsigned int j = 0; j < PLATFORM_CORE_COUNT; j++)
+ tftf_init_event(&cpu_ready[j]);
+
+ wakeup_irq_rcvd[core_pos] = 0;
+ deepest_power_state = get_deepest_power_state();
+
+ /* Suspend all cores other than lead core */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+
+ if (target_mpid == lead_mpid)
+ continue;
+
+ /* Turn on the non lead CPU and suspend it. */
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) suspend_non_lead_cpu,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ (unsigned int)target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wait for all non-lead CPUs to be ready */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ /* Wait for 10 ms to ensure all the secondaries have suspended */
+ waitms(10);
+
+ /*
+ * Register and program timer, then issue a system suspend
+ * when other cores are in suspend state
+ */
+ tftf_timer_register_handler(suspend_wakeup_handler);
+ tftf_program_timer_and_sys_suspend(
+ PLAT_SUSPEND_ENTRY_TIME, &timer_ret, &psci_ret);
+
+ /* Wake all non-lead CPUs */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_send_sgi(IRQ_NS_SGI_0, core_pos);
+ }
+
+ /* Check return from value from system suspend API */
+ if (psci_ret != PSCI_E_DENIED) {
+ tftf_testcase_printf("Entered suspend with cores in suspend\n");
+ ret = TEST_RESULT_FAIL;
+ }
+ if (timer_ret) {
+ tftf_testcase_printf("Failed to program the timer\n");
+ ret = TEST_RESULT_FAIL;
+ }
+ /* Unregister and cancel timer */
+ tftf_timer_unregister_handler();
+ tftf_cancel_timer();
+
+ return ret;
+}
+
+/*
+ * This functions holds the CPU till a `waitq` event is received.
+ */
+static test_result_t cpu_waitq(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ /* Wait for event from primary cpu */
+ tftf_wait_for_event(&waitq[core_pos]);
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ API TEST: Ensure that system suspend will not be successful
+ * if cores other than core issuing suspend are in running state
+ *
+ * Steps :
+ * - Turn on multiple cores on the non lead cluster
+ * - Issue SYSTEM SUSPEND. The API should return error.
+ */
+test_result_t test_psci_sys_susp_with_cores_on(void)
+{
+ unsigned int lead_cluster = MPIDR_CLUSTER_ID(read_mpidr_el1());
+ unsigned int core_pos;
+ unsigned int target_mpid, target_node;
+ int psci_ret;
+ int timer_ret;
+ test_result_t ret = TEST_RESULT_SUCCESS;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ SKIP_TEST_IF_LESS_THAN_N_CLUSTERS(2);
+
+ for (unsigned int j = 0; j < PLATFORM_CORE_COUNT; j++) {
+ tftf_init_event(&waitq[j]);
+ tftf_init_event(&cpu_ready[j]);
+ wakeup_irq_rcvd[j] = 0;
+ }
+
+ /* Turn on cores in non-lead cluster */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+
+ if (MPIDR_CLUSTER_ID(target_mpid) == lead_cluster)
+ continue;
+
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) cpu_waitq,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power "
+ "on CPU 0x%x (%d)\n",
+ (unsigned int)target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ core_pos = platform_get_core_pos(target_mpid);
+ /* Ensure that the core has booted */
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ /* Register timer handler */
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ /*
+ * Program timer to fire after delay and issue system suspend with
+ * other cores in ON state
+ */
+ tftf_program_timer_and_sys_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ &timer_ret, &psci_ret);
+
+ /* Send event to CPUs waiting for `waitq` event. */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+
+ /* Skip lead cluster */
+ if (MPIDR_CLUSTER_ID(target_mpid) == lead_cluster)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_send_event(&waitq[core_pos]);
+ }
+
+ /* Check return value from system suspend API */
+ if (psci_ret != PSCI_E_DENIED) {
+ tftf_testcase_printf("Test failed when suspending with return "
+ "value: %x \n", psci_ret);
+ ret = TEST_RESULT_FAIL;
+ }
+ if (timer_ret) {
+ tftf_testcase_printf("Test failed with return value when "
+ "programming the timer: %x \n", timer_ret);
+ ret = TEST_RESULT_FAIL;
+ }
+ tftf_timer_unregister_handler();
+ tftf_cancel_timer();
+ return ret;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/validate_power_state/test_validate_power_state.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/validate_power_state/test_validate_power_state.c
new file mode 100644
index 0000000..55de1fa
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/validate_power_state/test_validate_power_state.c
@@ -0,0 +1,581 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <events.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <stdlib.h>
+#include <tftf_lib.h>
+
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+static volatile unsigned int sgi_received[PLATFORM_CORE_COUNT];
+
+static test_result_t (*psci_validate_test_function)(void);
+
+/*
+ * Sets sgi_received flag for indicating SGI is processed so that
+ * test can exit in a clean state
+ */
+static int validate_pstate_sgi_handler(void *data)
+{
+ unsigned int core_pos;
+
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ sgi_received[core_pos] = 1;
+ return 0;
+}
+
+/*
+ * Gets the next possible composite state ID's combination and creates
+ * a composite ID from 0 to max power levels in the system. It then calculates
+ * whether the calculated composite ID is valid or invalid and validates EL3
+ * firmware's return value.
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : If PSCI return value is as expected
+ * TEST_RESULT_FAIL : If PSCI return value is not as expected
+ */
+static test_result_t validate_el3_pstate_parsing(void)
+{
+ unsigned int j;
+ unsigned int test_suspend_type;
+ unsigned int suspend_state_id;
+ unsigned int power_level;
+ int psci_ret;
+ int expected_return_val;
+ unsigned int power_state;
+ unsigned int pstate_id_idx[PLAT_MAX_PWR_LEVEL + 1];
+
+ INIT_PWR_LEVEL_INDEX(pstate_id_idx);
+
+ for (j = 0; j <= PLAT_MAX_PWR_LEVEL; j++) {
+ do {
+ tftf_set_next_state_id_idx(j, pstate_id_idx);
+
+ if (pstate_id_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ expected_return_val = tftf_get_pstate_vars(&power_level,
+ &test_suspend_type,
+ &suspend_state_id,
+ pstate_id_idx);
+ power_state = tftf_make_psci_pstate(power_level,
+ test_suspend_type,
+ suspend_state_id);
+
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (expected_return_val != psci_ret) {
+ tftf_testcase_printf("Failed with values: "
+ " psci_ret:%d"
+ " expected_return_val:%d"
+ " power_state:0x%x\n",
+ psci_ret,
+ expected_return_val,
+ power_state);
+ return TEST_RESULT_FAIL;
+ }
+ } while (1);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Creates a composite state ID of a single valid local level above level
+ * zero and tests the EL3 firmware's return value matches
+ * PSCI_E_INVALID_PARAMS.
+ *
+ * For level 0, both local and composite power state are same. Hence, it's
+ * skipped.
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : If PSCI return value is as expected
+ * TEST_RESULT_FAIL : If PSCI return value is not as expected
+ * TEST_RESULT_SKIPPED : If PLAT_MAX_PWR_LEVEL is < 1
+ */
+static test_result_t valid_only_local_stateid(void)
+{
+ unsigned int power_state;
+ int psci_ret;
+ unsigned int pstate_id_idx[PLAT_MAX_PWR_LEVEL + 1];
+ const plat_state_prop_t *local_level_state;
+ unsigned int i;
+
+ /* If only single power level is possible, SKIP the test */
+ if (!PLAT_MAX_PWR_LEVEL) {
+ tftf_testcase_printf("Platform has only a single valid local level\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ INIT_PWR_LEVEL_INDEX(pstate_id_idx);
+
+ /*
+ * Start from power level 1, as local state for power level zero will
+ * be a valid composite id
+ */
+ for (i = 1; i <= PLAT_MAX_PWR_LEVEL; i++) {
+ do {
+
+ INFO("Getting next local state:\n");
+ tftf_set_next_local_state_id_idx(i, pstate_id_idx);
+
+ if (pstate_id_idx[i] == PWR_STATE_INIT_INDEX)
+ break;
+ local_level_state = plat_get_state_prop(i) + pstate_id_idx[i];
+ power_state = tftf_make_psci_pstate(i,
+ local_level_state->is_pwrdown,
+ local_level_state->state_ID << PLAT_LOCAL_PSTATE_WIDTH);
+
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (psci_ret != PSCI_E_INVALID_PARAMS) {
+ tftf_testcase_printf("Expected invalid params but got :"
+ " psci_ret: %d"
+ " power_state:0x%x\n",
+ psci_ret,
+ power_state);
+
+ return TEST_RESULT_FAIL;
+ }
+ } while (pstate_id_idx[i] != PWR_STATE_INIT_INDEX);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Create a composite state ID of invalid state ID's at all levels and
+ * tests the EL3 firmware's return value matches PSCI_E_INVALID_PARAMS
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : If PSCI return value is as expected
+ * TEST_RESULT_FAIL : If PSCI return value is not as expected
+ */
+static test_result_t completely_invalid_stateid(void)
+{
+ unsigned int state_id;
+ int i;
+ unsigned int power_state;
+ int psci_ret;
+
+ state_id = 0;
+
+ /* Make stateID with all invalid ID's for all power levels */
+ for (i = 0; i < PLAT_MAX_PWR_LEVEL; i++)
+ state_id = state_id |
+ ((PLAT_PSCI_DUMMY_STATE_ID & ((1 << PLAT_LOCAL_PSTATE_WIDTH) - 1))
+ << (PLAT_LOCAL_PSTATE_WIDTH * i));
+
+ power_state = tftf_make_psci_pstate(PLAT_MAX_PWR_LEVEL, PSTATE_TYPE_POWERDOWN, state_id);
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (psci_ret != PSCI_E_INVALID_PARAMS) {
+ tftf_testcase_printf("Expected invalid params but got : %d"
+ " power_state:0x%x\n",
+ psci_ret,
+ power_state);
+
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Creates a composite power state with invalid state type and tests
+ * the EL3 firmware's return value matches PSCI_E_INVALID_PARAMS
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : If PSCI return value is as expected
+ * TEST_RESULT_FAIL : If PSCI return value is not as expected
+ */
+static test_result_t invalid_state_type(void)
+{
+ unsigned int test_suspend_type;
+ unsigned int suspend_state_id;
+ unsigned int power_level;
+ int psci_ret;
+ int expected_return_val;
+ unsigned int power_state;
+ unsigned int pstate_id_idx[PLAT_MAX_PWR_LEVEL + 1];
+
+ INIT_PWR_LEVEL_INDEX(pstate_id_idx);
+
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, pstate_id_idx);
+
+ if (pstate_id_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ expected_return_val = tftf_get_pstate_vars(&power_level,
+ &test_suspend_type,
+ &suspend_state_id,
+ pstate_id_idx);
+
+ if (expected_return_val != PSCI_E_SUCCESS)
+ continue;
+
+ /* Reverse the suspend type */
+ power_state = tftf_make_psci_pstate(power_level, !test_suspend_type, suspend_state_id);
+
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (PSCI_E_INVALID_PARAMS != psci_ret) {
+ tftf_testcase_printf("Failed with values:"
+ " psci_ret:%d"
+ " power_state:0x%x\n",
+ psci_ret,
+ power_state);
+ return TEST_RESULT_FAIL;
+ }
+ } while (1);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Creates a composite power state with valid local state but invalid
+ * power level and tests the EL3 firmware's return value matches
+ * PSCI_E_INVALID_PARAMS
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : If PSCI return value is as expected
+ * TEST_RESULT_FAIL : If PSCI return value is not as expected
+ * TEST_RESULT_SKIPPED : If EL3 firmware supports extended state ID
+ */
+static test_result_t invalid_power_level(void)
+{
+ unsigned int test_suspend_type;
+ unsigned int suspend_state_id;
+ unsigned int power_level;
+ int psci_ret;
+ int expected_return_val;
+ unsigned int power_state;
+ unsigned int pstate_id_idx[PLAT_MAX_PWR_LEVEL + 1];
+
+ /* Skip the test if EL3 firmware code supports extended state ID */
+ if (!tftf_is_psci_pstate_format_original())
+ return TEST_RESULT_SKIPPED;
+
+ INIT_PWR_LEVEL_INDEX(pstate_id_idx);
+
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, pstate_id_idx);
+
+ if (pstate_id_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ expected_return_val = tftf_get_pstate_vars(&power_level,
+ &test_suspend_type,
+ &suspend_state_id,
+ pstate_id_idx);
+
+ if (expected_return_val != PSCI_E_SUCCESS)
+ continue;
+
+ /* Make a power state with invalid power level */
+ power_state = tftf_make_psci_pstate(power_level + 1,
+ test_suspend_type,
+ suspend_state_id);
+
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (PSCI_E_INVALID_PARAMS != psci_ret) {
+ tftf_testcase_printf("Failed with values:"
+ " psci_ret:%d"
+ " power_state:0x%x\n",
+ psci_ret,
+ power_state);
+ return TEST_RESULT_FAIL;
+ }
+ } while (1);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Creates a composite state ID of valid local state at some levels
+ * and invalid state ID at others and tests the EL3 firmware's return
+ * value matches PSCI_E_INVALID_PARAMS
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : If PSCI return value is as expected
+ * TEST_RESULT_FAIL : If PSCI return value is not as expected
+ * TEST_RESULT_SKIPPED : If PLAT_MAX_PWR_LEVEL is < 1
+ */
+static test_result_t mixed_state_id(void)
+{
+ unsigned int test_suspend_type;
+ unsigned int suspend_state_id;
+ unsigned int power_level;
+ int psci_ret;
+ unsigned int power_state;
+ unsigned int j;
+ unsigned int pstate_id_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int invalid_id_set;
+
+ /*
+ * Platform contains only one power level and hence we cann't have
+ * both valid and invalid local state
+ */
+ if (!PLAT_MAX_PWR_LEVEL)
+ return TEST_RESULT_SKIPPED;
+
+ INIT_PWR_LEVEL_INDEX(pstate_id_idx);
+
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, pstate_id_idx);
+
+ if (pstate_id_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ if (tftf_get_pstate_vars(&power_level,
+ &test_suspend_type,
+ &suspend_state_id,
+ pstate_id_idx) != PSCI_E_SUCCESS)
+ continue;
+
+ invalid_id_set = 0;
+
+ /*
+ * Generate a state ID with valid and invalid local state ID's at
+ * different levels
+ */
+ for (j = 0; j <= power_level; j++) {
+ /* Set index to invalid level for even power levels */
+ if (rand() % 2) {
+ suspend_state_id = suspend_state_id |
+ ((PLAT_PSCI_DUMMY_STATE_ID &
+ ((1 << PLAT_LOCAL_PSTATE_WIDTH) - 1))
+ << (PLAT_LOCAL_PSTATE_WIDTH * j));
+ invalid_id_set = 1;
+ }
+ }
+
+ /*
+ * Overwrite state ID for a random level if none of the
+ * levels are invalid
+ */
+ if (!invalid_id_set) {
+ j = rand() % (power_level + 1);
+ suspend_state_id = suspend_state_id |
+ ((PLAT_PSCI_DUMMY_STATE_ID &
+ ((1 << PLAT_LOCAL_PSTATE_WIDTH) - 1))
+ << (PLAT_LOCAL_PSTATE_WIDTH * j));
+ }
+
+ power_state = tftf_make_psci_pstate(power_level, test_suspend_type, suspend_state_id);
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (psci_ret != PSCI_E_INVALID_PARAMS) {
+ tftf_testcase_printf("Failed with values: power_level: %d"
+ " test_suspend_type: %d"
+ " suspend_state_id:%d"
+ " psci_ret:%d"
+ " power_state:0x%x\n",
+ power_level,
+ test_suspend_type,
+ suspend_state_id,
+ psci_ret,
+ power_state);
+ return TEST_RESULT_FAIL;
+ }
+ } while (1);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+
+/*
+ * This function contains common code for all test cases and runs the testcase
+ * specific code.
+ *
+ * Returns the return value of test case specific code
+ */
+static test_result_t test_execute_test_function(void)
+{
+ test_result_t ret;
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ tftf_irq_register_handler(IRQ_NS_SGI_0, validate_pstate_sgi_handler);
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ /*
+ * Mask IRQ to prevent the interrupt handler being invoked
+ * and clearing the interrupt. A pending interrupt will cause this
+ * CPU to wake-up from suspend.
+ */
+ disable_irq();
+
+ /* Configure an SGI to wake-up from suspend */
+ tftf_send_sgi(IRQ_NS_SGI_0, core_pos);
+
+ ret = (*psci_validate_test_function)();
+
+ enable_irq();
+
+ while (!sgi_received[core_pos])
+ ;
+
+ tftf_irq_disable(IRQ_NS_SGI_0);
+ tftf_irq_unregister_handler(IRQ_NS_SGI_0);
+
+ return ret;
+}
+
+/*
+ * Non-lead CPU entry point function for all PSCI PSTATE validation functions.
+ *
+ * Returns the return value of test case specific code
+ */
+static test_result_t test_non_lead_cpu_validate_ep(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ /*
+ * Tell the lead CPU that the calling CPU is ready to validate
+ * extended power state parsing
+ */
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ return test_execute_test_function();
+}
+
+/*
+ * Lead CPU entry point function for all PSCI PSTATE validation functions. It
+ * powers on all secondaries and executes the test cases specific code.
+ *
+ * Returns the return value of test case specific code or SKIPPED in case
+ * if it is unable to power on a core or EL3 firmware only supports NULL
+ * stateID.
+ */
+static test_result_t test_lead_cpu_validate_ep(void)
+{
+ test_result_t ret;
+ unsigned int core_pos;
+ unsigned long lead_mpid;
+ unsigned long target_mpid;
+ unsigned long cpu_node;
+ int i;
+
+ if (tftf_is_psci_state_id_null()) {
+ tftf_testcase_printf("EL3 firmware supports only NULL stateID\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Initialise cpu_ready event variable */
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+ tftf_init_event(&cpu_ready[i]);
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /*
+ * Preparation step: Power on all cores.
+ */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) test_non_lead_cpu_validate_ep,
+ 0);
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ (unsigned int)target_mpid, ret);
+
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all non-lead CPUs to be ready */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ /* Call this to execute the test case specific code */
+ return test_execute_test_function();
+}
+
+/*
+ * Creates all possible valid local state ID's at all levels and tests
+ * the EL3 firmware's return value matches the expected one.
+ */
+test_result_t test_psci_validate_pstate(void)
+{
+ psci_validate_test_function = &validate_el3_pstate_parsing;
+ return test_lead_cpu_validate_ep();
+}
+
+/*
+ * Creates a composite state ID of a single valid local level and
+ * tests the EL3 firmware's return value matches the expected one.
+ */
+test_result_t test_psci_valid_local_pstate(void)
+{
+ psci_validate_test_function = &valid_only_local_stateid;
+ return test_lead_cpu_validate_ep();
+}
+
+/*
+ * Create a composite state ID of invalid state ID's at all levels
+ * and tests the EL3 firmware's return value matches the expected
+ * one.
+ */
+test_result_t test_psci_invalid_stateID(void)
+{
+ psci_validate_test_function = &completely_invalid_stateid;
+ return test_lead_cpu_validate_ep();
+}
+
+/*
+ * Creates a composite state ID of invalid state type and tests the
+ * EL3 firmware's return value matches the expected one.
+ */
+test_result_t test_psci_invalid_state_type(void)
+{
+ psci_validate_test_function = &invalid_state_type;
+ return test_lead_cpu_validate_ep();
+}
+
+/*
+ * Creates a composite state ID of invalid power level in original
+ * state format and tests the EL3 firmware's return value matches the
+ * expected value.
+ */
+test_result_t test_psci_invalid_power_level(void)
+{
+ psci_validate_test_function = &invalid_power_level;
+ return test_lead_cpu_validate_ep();
+}
+
+/*
+ * Creates a composite state ID of valid local state at some levels
+ * and invalid state ID at others and tests the EL3 firmware's return
+ * value matches the expected value
+ */
+test_result_t test_psci_mixed_state_id(void)
+{
+ psci_validate_test_function = &mixed_state_id;
+ return test_lead_cpu_validate_ep();
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_hotplug_stress.c b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_hotplug_stress.c
new file mode 100644
index 0000000..3ac4bc5
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_hotplug_stress.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <test_helpers.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+#define STRESS_TEST_COUNT 1000
+
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
+static event_t cluster_booted;
+
+/* Return success depicting CPU booted successfully */
+static test_result_t test_cpu_booted(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Return success depicting all cores in a cluster booted successfully */
+static test_result_t test_cluster_booted(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ tftf_wait_for_event(&cluster_booted);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Random hotplug cores in a large iteration to stress boot path code
+ * Test Do :
+ * 1) Power up a random core
+ * 2) Ensure this core has booted successfully to TFTF
+ * 3) Wait for core to be powered off by the framework.
+ * 4) Repeat 1-2-3 STRESS_TEST_COUNT times
+ * 5) The test is aborted straight away if any failure occurs. In this case,
+ * the test is declared as failed.
+ * Note: It will be skipped on single-core platforms.
+ */
+test_result_t psci_hotplug_single_core_stress_test(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu;
+ unsigned int core_pos;
+ int psci_ret;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ NOTICE("Power on and off any random core %d times\n",
+ STRESS_TEST_COUNT);
+
+ for (unsigned int i = 0; i < STRESS_TEST_COUNT; ++i) {
+ /* Reset/Initialise the event variable */
+ for (unsigned int j = 0; j < PLATFORM_CORE_COUNT; ++j)
+ tftf_init_event(&cpu_booted[j]);
+
+ /*
+ * Find a random CPU to power up and power down
+ */
+ cpu = tftf_find_random_cpu_other_than(lead_mpid);
+ assert(cpu != lead_mpid);
+
+ psci_ret = tftf_cpu_on(cpu,
+ (uintptr_t) test_cpu_booted,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ core_pos = platform_get_core_pos(cpu);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+
+ /*
+ * Wait for the CPU to be powered off by framework before issuing a
+ * CPU_ON to it
+ */
+ while (tftf_is_cpu_online(cpu))
+ ;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Repeated cores hotplug as stress test
+ * Test Do :
+ * 1) Power up all the cores
+ * 2) Ensure all the cores have booted successfully to TFTF
+ * 3) Wait for all the cores to be powered off by the framework.
+ * 4) Repeat 1-2-3 STRESS_TEST_COUNT times
+ * 5) The test is aborted straight away if any failure occurs. In this case,
+ * the test is declared as failed.
+ * Note: It will be skipped on single-core platforms.
+ */
+test_result_t psci_hotplug_stress_test(void)
+{
+ unsigned int lead_cpu = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ int psci_ret;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ NOTICE("This multi-core test will repeat %d times\n",
+ STRESS_TEST_COUNT);
+
+ for (unsigned int i = 0; i < STRESS_TEST_COUNT; i++) {
+ /* Reinitialize the event variable */
+ for (unsigned int j = 0; j < PLATFORM_CORE_COUNT; ++j)
+ tftf_init_event(&cpu_booted[j]);
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) test_cpu_booted,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Confirm the non-lead cpus booted and participated in the test
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /*
+ * Except lead CPU, Wait for all cores to be powered off
+ * by framework
+ */
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ while (tftf_is_cpu_online(cpu_mpid))
+ ;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Stress test cluster hotplug
+ * Test Do :
+ * 1) Power up all the cores of non-lead cluster
+ * 2) Ensure all the cores have booted successfully to TFTF
+ * 3) Wait for all the cores to be powered off by the framework.
+ * 4) Repeat 1-2-3 STRESS_TEST_COUNT times
+ * 5) The test is aborted straight away if any failure occurs. In this case,
+ * the test is declared as failed.
+ * Note: It will be skipped on single-cluster platforms.
+ */
+test_result_t psci_cluster_hotplug_stress_test(void)
+{
+ unsigned int lead_cluster = MPIDR_CLUSTER_ID(read_mpidr_el1());
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ int psci_ret;
+
+ SKIP_TEST_IF_LESS_THAN_N_CLUSTERS(2);
+
+ NOTICE("This Cluster hotplug test will repeat %d times\n",
+ STRESS_TEST_COUNT);
+
+ for (unsigned int i = 0; i < STRESS_TEST_COUNT; i++) {
+ /* Reset/Initialise the event variable */
+ tftf_init_event(&cluster_booted);
+
+ /* Reset/Initialise the event variable */
+ for (unsigned int j = 0; j < PLATFORM_CORE_COUNT; ++j)
+ tftf_init_event(&cpu_booted[j]);
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (MPIDR_CLUSTER_ID(cpu_mpid) != lead_cluster) {
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) test_cluster_booted,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /*
+ * Confirm all the CPU's in non-lead cluster booted
+ * and participated in the test
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU cluster */
+ if (MPIDR_CLUSTER_ID(cpu_mpid) == lead_cluster)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ /*
+ * All cores have booted, Now send the signal to them so that
+ * they enter the framework
+ */
+ tftf_send_event_to_all(&cluster_booted);
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (MPIDR_CLUSTER_ID(cpu_mpid) != lead_cluster)
+ /*
+ * Wait for CPU to power off before issuing a
+ * CPU_ON for it
+ */
+ while (tftf_is_cpu_online(cpu_mpid))
+ ;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c
new file mode 100644
index 0000000..99f6854
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c
@@ -0,0 +1,576 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+#define STRESS_TEST_COUNT 100
+
+/* Per-CPU counters used for the coherency test */
+typedef struct cpu_pm_ops_desc {
+ spinlock_t lock;
+ unsigned int pcpu_count[PLATFORM_CORE_COUNT];
+} cpu_pm_ops_desc_t;
+
+static cpu_pm_ops_desc_t device_pm_ops_desc
+ __attribute__ ((section("tftf_coherent_mem")));
+static cpu_pm_ops_desc_t normal_pm_ops_desc;
+
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
+static volatile unsigned int start_test;
+static unsigned int exit_test;
+static unsigned int power_state;
+/* The target for CPU ON requests */
+static volatile unsigned long long target_mpid;
+
+static spinlock_t counter_lock;
+static volatile unsigned int cpu_on_count;
+/* Whether CPU suspend calls should be thrown into the test */
+static unsigned int include_cpu_suspend;
+
+static test_result_t secondary_cpu_on_race_test(void);
+
+/*
+ * Utility function to wait for all CPUs other than the caller to be
+ * OFF.
+ */
+static void wait_for_non_lead_cpus(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid, target_node;
+
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU, as it is powered on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0)
+ != PSCI_STATE_OFF)
+ ;
+ }
+}
+
+/*
+ * Update per-cpu counter corresponding to the current CPU.
+ * This function updates 2 counters, one in normal memory and the other
+ * in coherent device memory. The counts are then compared to check if they
+ * match. This verifies that the caches and the interconnect are coherent
+ * during the test.
+ * Returns -1 on error, 0 on success.
+ */
+static int update_counters(void)
+{
+ unsigned int normal_count, device_count;
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ /*
+ * Ensure that the copies of the counters in device and normal memory
+ * match. The locks and the data should become incoherent if any cluster
+ * is not taking part in coherency.
+ */
+ spin_lock(&normal_pm_ops_desc.lock);
+ normal_count = normal_pm_ops_desc.pcpu_count[core_pos];
+ spin_unlock(&normal_pm_ops_desc.lock);
+
+ spin_lock(&device_pm_ops_desc.lock);
+ device_count = device_pm_ops_desc.pcpu_count[core_pos];
+ spin_unlock(&device_pm_ops_desc.lock);
+
+ if (device_count != normal_count) {
+ tftf_testcase_printf("Count mismatch. Device memory count ="
+ " %u: normal memory count = %u\n",
+ device_count, normal_count);
+ return -1;
+ }
+
+ /* Increment the count in both copies of the counter */
+ spin_lock(&normal_pm_ops_desc.lock);
+ normal_pm_ops_desc.pcpu_count[core_pos]++;
+ spin_unlock(&normal_pm_ops_desc.lock);
+
+ spin_lock(&device_pm_ops_desc.lock);
+ device_pm_ops_desc.pcpu_count[core_pos]++;
+ spin_unlock(&device_pm_ops_desc.lock);
+
+ return 0;
+}
+
+/*
+ * The test loop for non lead CPUs in psci_on_off_suspend_coherency_test. It
+ * updates the counters and depending on the value of the random variable `op`,
+ * the secondaries either offlines (by returning back to Test framework) or
+ * suspends itself.
+ */
+static test_result_t random_suspend_off_loop(void)
+{
+#define OFFLINE_CORE 1
+#define SUSPEND_CORE 0
+
+ int rc, op;
+
+ while (!exit_test) {
+ rc = update_counters();
+ if (rc)
+ return TEST_RESULT_FAIL;
+
+ /* Find what we will be doing next */
+ op = rand() % 2;
+
+ /*
+ * If the chosen action is to power off, then return from the
+ * test function so that the test framework powers this CPU off.
+ */
+ if (op == OFFLINE_CORE)
+ return TEST_RESULT_SUCCESS;
+
+ /* Program timer for wake-up event. */
+ rc = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ power_state, NULL, NULL);
+
+ tftf_cancel_timer();
+
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("CPU timer/suspend returned error"
+ " 0x%x\n", rc);
+ return TEST_RESULT_FAIL;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t lead_cpu_main(unsigned long long mpid)
+{
+ unsigned int rc;
+ unsigned long long rand_mpid;
+ int i;
+
+ /* The lead cpu will not be turned off. */
+ for (i = STRESS_TEST_COUNT; i >= 0; i--) {
+ rc = update_counters();
+ if (rc)
+ return TEST_RESULT_FAIL;
+
+ /* Program timer for wake-up event. */
+ rc = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ power_state, NULL, NULL);
+
+ tftf_cancel_timer();
+
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("CPU timer/suspend returned error"
+ " 0x%x\n", rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * The lead cpu's woken up since the system timer has fired.
+ * For any cpus which have turned themselves off, generate a
+ * random MPIDR and try turning on the corresponding cpu.
+ */
+ do {
+ rand_mpid = tftf_find_random_cpu_other_than(mpid);
+ } while (tftf_psci_affinity_info(rand_mpid, MPIDR_AFFLVL0)
+ != PSCI_STATE_OFF);
+
+ rc = tftf_try_cpu_on(rand_mpid,
+ (uintptr_t) random_suspend_off_loop,
+ 0);
+ if ((rc != PSCI_E_ALREADY_ON) &&
+ (rc != PSCI_E_ON_PENDING) &&
+ (rc != PSCI_E_SUCCESS) &&
+ (rc != PSCI_E_INVALID_PARAMS)) {
+ tftf_testcase_printf("CPU ON failed with error ="
+ " 0x%x\n", rc);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ exit_test = 1;
+ /* Ensure update to `exit_test` is seen by all cores prior to
+ invoking wait_for_non_lead_cpus() */
+ dmbsy();
+
+ wait_for_non_lead_cpus();
+
+ INFO("Exiting test\n");
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Send event depicting CPU booted successfully and then invoke
+ * random_suspend_off_loop.
+ */
+static test_result_t non_lead_random_suspend_off_loop(void)
+{
+ unsigned long long mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ return random_suspend_off_loop();
+}
+
+/*
+ * @Test_Aim@ Repeated cores hotplug as stress test
+ * Test Do :
+ * 1) Power up all the cores
+ * 2) Ensure all the cores have booted successfully to TFTF
+ * 3) Randomly suspend or turn OFF secondary CPU
+ * 4) The lead CPU will suspend and turn ON a random CPU which has powered OFF.
+ * 5) Repeat 1-4 STRESS_TEST_COUNT times
+ * 6) The test is aborted straight away if any failure occurs. In this case,
+ * the test is declared as failed.
+ * Note: The test will be skipped on single-core platforms.
+ */
+test_result_t psci_on_off_suspend_coherency_test(void)
+{
+ unsigned int cpu_node, core_pos;
+ unsigned long long cpu_mpid, lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int counter_lo, stateid;
+ int psci_ret;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ /* Reinitialize the event variable */
+ for (unsigned int j = 0; j < PLATFORM_CORE_COUNT; ++j)
+ tftf_init_event(&cpu_booted[j]);
+
+ init_spinlock(&normal_pm_ops_desc.lock);
+ init_spinlock(&device_pm_ops_desc.lock);
+
+ exit_test = 0;
+
+ /* Seed the random number generator */
+ counter_lo = (unsigned int) read_cntpct_el0();
+ srand(counter_lo);
+
+ psci_ret = tftf_psci_make_composite_state_id(PLAT_MAX_PWR_LEVEL,
+ PSTATE_TYPE_POWERDOWN, &stateid);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to construct composite state\n");
+ return TEST_RESULT_SKIPPED;
+ }
+ power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0,
+ PSTATE_TYPE_POWERDOWN, stateid);
+
+ /* Turn on all the non-lead CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) non_lead_random_suspend_off_loop, 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Confirm the non-lead cpus booted and participated in the test
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ core_pos = platform_get_core_pos(lead_mpid);
+ return lead_cpu_main(lead_mpid);
+}
+
+/*
+ * Frantically send CPU ON requests to the target mpidr till it returns
+ * ALREADY_ON.
+ * Return 0 on success, 1 on error.
+ */
+static int test_cpu_on_race(void)
+{
+ int ret;
+
+ do {
+ ret = tftf_try_cpu_on(target_mpid,
+ (uintptr_t) secondary_cpu_on_race_test, 0);
+ if (ret != PSCI_E_SUCCESS && ret != PSCI_E_ON_PENDING &&
+ ret != PSCI_E_ALREADY_ON) {
+ tftf_testcase_printf("Unexpected return value 0x%x"
+ " from PSCI CPU ON\n", ret);
+ return 1;
+ }
+ } while (ret != PSCI_E_ALREADY_ON);
+
+ return 0;
+}
+
+/*
+ * This function runs the test_cpu_on_race() till either `exit_test`
+ * is set or the `target_mpid` is the current mpid.
+ */
+static test_result_t secondary_cpu_on_race_test(void)
+{
+ unsigned long long mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ int ret;
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ /* Wait for start flag */
+ while (start_test == 0)
+ ;
+
+ do {
+ /*
+ * If the current CPU is the target mpid, then power OFF.
+ * The target mpid will be target for CPU ON requests by other
+ * cores.
+ */
+ if (mpid == target_mpid)
+ return TEST_RESULT_SUCCESS;
+
+ ret = test_cpu_on_race();
+ if (ret)
+ return TEST_RESULT_FAIL;
+ } while (exit_test == 0);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Verify that the CPU ON race conditions are handled in Firmware.
+ * Test Do :
+ * 1. Designate a target CPU to power down.
+ * 2. Boot up all the CPUs on the system
+ * 3. If the CPU is the designated target CPU, power OFF
+ * 4. All the other cores issue CPU ON to the target CPU continuously.
+ * As per PSCI specification, only one CPU ON call will succeed in sending
+ * the ON command to the target CPU.
+ * 5. The Target CPU should turn ON and execute successfully.
+ * 6. The test should iterate again with another CPU as the target CPU this
+ * time.
+ * 7. Repeat Steps 1-6 for STRESS_TEST_COUNT times.
+ * Note: The test will be skipped on single-core platforms.
+ */
+test_result_t psci_verify_cpu_on_race(void)
+{
+ unsigned int cpu_node, core_pos, target_node, j;
+ unsigned long long lead_mpid = read_mpidr_el1() & MPID_MASK, cpu_mpid;
+ int ret, rc = 0;
+
+ exit_test = 0;
+ start_test = 0;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ /* Reinitialize the event variable */
+ for (j = 0; j < PLATFORM_CORE_COUNT; j++)
+ tftf_init_event(&cpu_booted[j]);
+
+ /* Turn ON all other CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ /* Skip the lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) secondary_cpu_on_race_test, 0);
+ if (ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ for (j = 0; j < STRESS_TEST_COUNT; j++) {
+ /* Choose a target CPU */
+ for_each_cpu(target_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(target_node);
+
+ /* Skip the lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ target_mpid = cpu_mpid;
+ /*
+ * Ensure target_mpid update is visible prior to
+ * starting test.
+ */
+ dmbsy();
+
+ VERBOSE("Target MPID = %llx\n", target_mpid);
+ start_test = 1;
+
+ /* Wait for the target CPU to turn OFF */
+ while (tftf_psci_affinity_info(target_mpid,
+ MPIDR_AFFLVL0) != PSCI_STATE_OFF)
+ ;
+ rc = test_cpu_on_race();
+ if (rc)
+ break;
+ }
+ if (rc)
+ break;
+ }
+ exit_test = 1;
+ wait_for_non_lead_cpus();
+ return rc ? TEST_RESULT_FAIL : TEST_RESULT_SUCCESS;
+}
+
+/*
+ * The Test function to stress test CPU ON/OFF PSCI APIs executed by all CPUs.
+ * This function maintains a global counter which is incremented atomically
+ * when a CPU enters this function via warm boot (in case of secondary) or
+ * direct invocation (in case of lead CPU). It sends CPU ON to all OFF CPUs
+ * till another CPU has entered this function or `exit_flag` is set.
+ * If `include_cpu_suspend` is set, then it suspends the current CPU
+ * before iterating in the loop to send CPU ON requests.
+ */
+static test_result_t launch_cpu_on_off_stress(void)
+{
+ unsigned long long mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_node, temp_count;
+ int ret;
+
+ spin_lock(&counter_lock);
+ /* Store the count in a temporary variable */
+ temp_count = ++cpu_on_count;
+ spin_unlock(&counter_lock);
+
+ if (exit_test)
+ return TEST_RESULT_SUCCESS;
+
+ while (!exit_test) {
+ for_each_cpu(cpu_node) {
+ mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (tftf_is_cpu_online(mpid))
+ continue;
+
+ ret = tftf_try_cpu_on(mpid,
+ (uintptr_t) launch_cpu_on_off_stress, 0);
+ if (ret != PSCI_E_SUCCESS && ret !=
+ PSCI_E_ON_PENDING && ret != PSCI_E_ALREADY_ON) {
+ tftf_testcase_printf("Unexpected return value"
+ " 0x%x from PSCI CPU ON\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Break if another CPU has entered this test function */
+ if (temp_count != cpu_on_count)
+ break;
+
+ /* Check whether to suspend before iterating */
+ if (include_cpu_suspend) {
+ ret = tftf_program_timer_and_suspend(
+ PLAT_SUSPEND_ENTRY_TIME, power_state, NULL, NULL);
+
+ tftf_cancel_timer();
+
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("CPU timer/suspend"
+ " returned error 0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+ }
+
+ spin_lock(&counter_lock);
+ if (cpu_on_count >= (STRESS_TEST_COUNT * PLATFORM_CORE_COUNT)) {
+ cpu_on_count = 0;
+ spin_unlock(&counter_lock);
+ exit_test = 1;
+ /* Wait for all cores to power OFF */
+ wait_for_non_lead_cpus();
+
+ /*
+ * In case any other CPUs were turned ON in the meantime, wait
+ * for them as well.
+ */
+ wait_for_non_lead_cpus();
+ } else
+ spin_unlock(&counter_lock);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Stress test CPU ON / OFF APIs.
+ * Test Do :
+ * 1. Maintain a global counter will be atomically updated by each CPU
+ * when it turns ON.
+ * 2. Iterate over all the CPU in the topology and invoke CPU ON on CPU
+ * which are OFF.
+ * 3. Check if the global counter has updated. If not, goto step 2
+ * 4. All the cores which are turned ON executes Step 1 - 3.
+ * Note: The test will be skipped on single-core platforms.
+ */
+test_result_t psci_cpu_on_off_stress(void)
+{
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+ init_spinlock(&counter_lock);
+ cpu_on_count = 0;
+ exit_test = 0;
+ include_cpu_suspend = 0;
+ return launch_cpu_on_off_stress();
+}
+
+/*
+ * @Test_Aim@ Stress test CPU ON / OFF APIs with SUSPEND in between.
+ * Test Do :
+ * 1. Maintain a global counter which will be atomically updated by each CPU
+ * when it turns ON.
+ * 2. Iterate over all the CPUs in the topology and invoke CPU ON on CPUs
+ * which are OFF.
+ * 3. Check if the global counter has updated. If not, program wakeup
+ * timer and suspend. On wake-up goto step 2.
+ * 4. All the cores which are turned ON executes Step 1 - 3.
+ * Note: The test will be skipped on single-core platforms.
+ */
+test_result_t psci_cpu_on_off_suspend_stress(void)
+{
+ int rc;
+ unsigned int stateid;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ init_spinlock(&counter_lock);
+ cpu_on_count = 0;
+ exit_test = 0;
+
+ rc = tftf_psci_make_composite_state_id(PLAT_MAX_PWR_LEVEL,
+ PSTATE_TYPE_POWERDOWN, &stateid);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to construct composite state\n");
+ return TEST_RESULT_SKIPPED;
+ }
+ power_state = tftf_make_psci_pstate(PLAT_MAX_PWR_LEVEL,
+ PSTATE_TYPE_POWERDOWN, stateid);
+
+ include_cpu_suspend = 1;
+ return launch_cpu_on_off_stress();
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_system_suspend_stress.c b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_system_suspend_stress.c
new file mode 100644
index 0000000..5b500be
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_system_suspend_stress.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <debug.h>
+#include <events.h>
+#include <gic_v2.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#define MAX_TEST_ITERATIONS (100 * PLATFORM_CORE_COUNT)
+
+/* Number of iterations of the test */
+static int iteration_count;
+
+/* The CPU assigned the baton to drive the test */
+static u_register_t baton_cpu;
+
+/* Synchronization event which will be waited on by all the non-baton CPUs */
+static event_t sync_event;
+
+/* Global variables to synchronize participating CPUs on wake-up */
+static spinlock_t cpu_count_lock;
+static volatile int cpu_count;
+static volatile int participating_cpu_count;
+
+/* Variable to store the system suspend power state and its statistics */
+static int system_susp_pwr_state;
+static u_register_t susp_count;
+
+static test_result_t do_sys_susp_on_off_stress(void);
+
+/*
+ * Helper function to wait for participating CPUs participating to enter the
+ * test function.
+ */
+static void wait_for_cpus_to_enter_test(void)
+{
+ assert(participating_cpu_count <= PLATFORM_CORE_COUNT);
+ while (cpu_count != participating_cpu_count)
+ ;
+}
+
+/* Helper function to increment the cpu_count */
+static void inc_cpu_count(void)
+{
+ spin_lock(&cpu_count_lock);
+ cpu_count++;
+ spin_unlock(&cpu_count_lock);
+ assert(cpu_count <= PLATFORM_CORE_COUNT);
+}
+
+/* Helper function to decrement the cpu_count */
+static void dec_cpu_count(void)
+{
+ spin_lock(&cpu_count_lock);
+ cpu_count--;
+ spin_unlock(&cpu_count_lock);
+ assert(cpu_count >= 0);
+}
+
+/* Helper function to turn ON all the CPUs in the platform */
+static int try_cpu_on_all(void)
+{
+ int ret, cpu_node;
+ u_register_t cpu_mpid, current_cpu = read_mpidr_el1() & MPID_MASK;
+
+ /* Try to turn on all the non-lead CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == current_cpu)
+ continue;
+
+ do {
+ ret = tftf_try_cpu_on(cpu_mpid,
+ (uintptr_t) do_sys_susp_on_off_stress, 0);
+ if (ret != PSCI_E_SUCCESS && ret != PSCI_E_ON_PENDING &&
+ ret != PSCI_E_ALREADY_ON) {
+ ERROR("Unexpected return value 0x%x"
+ " from PSCI CPU ON\n", ret);
+ return -1;
+ }
+ } while (ret != PSCI_E_SUCCESS);
+ }
+ return 0;
+}
+
+/* Helper function function to get number of CPUs which are OFF in the system */
+static int get_off_cpu_count(void)
+{
+ int aff_off_cpus = 0;
+ u_register_t cpu_mpid, current_cpu = read_mpidr_el1() & MPID_MASK;
+ int cpu_node;
+
+ /* Query the number of OFF CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == current_cpu)
+ continue;
+
+ if (tftf_psci_affinity_info(cpu_mpid, MPIDR_AFFLVL0) ==
+ PSCI_STATE_OFF)
+ aff_off_cpus++;
+ }
+
+ return aff_off_cpus;
+}
+
+/*
+ * The main test function which will be executed by all CPUs.
+ * 1. The CPU holding the baton will first enter this function and then turns
+ * ON all other CPUs.
+ * 2. All the `non-baton` CPUs then wait for the `sync_event` to be signaled
+ * to turn themselves OFF.
+ * 3. Number of CPUs which are signaled via `sync_event` by baton CPU is
+ * random.
+ * 4. After signaled CPUs have turned themselves OFF, SYSTEM SUSPEND is
+ * issued by the baton CPU.
+ * 5. The return value of SYSTEM_SUSPEND is checked by the baton CPU.
+ * 6. The next baton CPU is chosen randomly and the test is handed over to this
+ * CPU.
+ */
+static test_result_t do_sys_susp_on_off_stress(void)
+{
+ int psci_ret, off_cpu_count;
+ u_register_t current_cpu;
+
+ inc_cpu_count();
+
+ current_cpu = read_mpidr_el1() & MPID_MASK;
+ if (current_cpu != baton_cpu) {
+ tftf_wait_for_event(&sync_event);
+ dec_cpu_count();
+ return TEST_RESULT_SUCCESS;
+ }
+
+ INFO("System suspend test: Baton holder CPU = 0x%llx\n",
+ (unsigned long long) current_cpu);
+ if (try_cpu_on_all() == -1) {
+ tftf_testcase_printf("CPU_ON of secondary CPUs failed.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ wait_for_cpus_to_enter_test();
+
+ /* Turn off random number of cores 1 out of 3 times */
+ if (rand() % 3)
+ off_cpu_count = rand() % participating_cpu_count;
+ else
+ off_cpu_count = participating_cpu_count - 1;
+
+ /* Signal random number of CPUs to turn OFF */
+ tftf_send_event_to(&sync_event, off_cpu_count);
+
+ /* Wait for `off_cpu_count` CPUs to turn OFF */
+ while (get_off_cpu_count() != off_cpu_count)
+ ;
+
+ /* Program timer to fire after delay */
+ tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME);
+
+ /* Issue SYSTEM SUSPEND */
+ psci_ret = tftf_system_suspend();
+ tftf_cancel_timer();
+
+ /* Check return value of SYSTEM SUSPEND API */
+ if (off_cpu_count == (participating_cpu_count - 1)) {
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("SYSTEM SUSPEND did not succeed "
+ "where expected\n");
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ if (psci_ret != PSCI_E_DENIED) {
+ tftf_testcase_printf("SYSTEM SUSPEND did not fail "
+ "where expected\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Pass the baton top another CPU */
+ baton_cpu = tftf_find_random_cpu_other_than(current_cpu);
+
+ /* Unblock the waiting CPUs */
+ tftf_send_event_to(&sync_event,
+ (participating_cpu_count - 1) - off_cpu_count);
+
+ /* Wait for all CPUs other than current to turn OFF */
+ while (get_off_cpu_count() != (participating_cpu_count - 1))
+ ;
+
+ dec_cpu_count();
+
+ if (iteration_count++ < MAX_TEST_ITERATIONS) {
+ /* Hand over the test execution the new baton CPU */
+ psci_ret = tftf_cpu_on(baton_cpu,
+ (uintptr_t) do_sys_susp_on_off_stress, 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ /* Wait for new baton CPU to enter test */
+ while (cpu_count == 0)
+ ;
+ } else {
+ /*
+ * The test has completed. Print statistics if PSCI STAT COUNT
+ * is supported.
+ */
+ if (is_psci_stat_count_supported()) {
+ u_register_t count = tftf_psci_stat_count(baton_cpu,
+ system_susp_pwr_state);
+ tftf_testcase_printf("Iterated %d with %lld system"
+ " suspends\n", MAX_TEST_ITERATIONS,
+ (unsigned long long)(count - susp_count));
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Stress test PSCI SYSTEM SUSPEND API.
+ * This test iteratively issues PSCI SYSTEM SUSPEND on random cores after
+ * issuing turning OFF a random number of CPUs. The PSCI SYSTEM SUSPEND
+ * will only succeed if all the CPUs except the calling CPU is OFF.
+ */
+test_result_t psci_sys_susp_on_off_stress_test(void)
+{
+ unsigned int pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int pwrlvl, susp_type, state_id;
+ int ret;
+
+ if (!is_psci_sys_susp_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ INIT_PWR_LEVEL_INDEX(pstateid_idx);
+ tftf_init_event(&sync_event);
+ init_spinlock(&cpu_count_lock);
+
+ /* Initialize participating CPU count */
+ participating_cpu_count = tftf_get_total_cpus_count();
+ cpu_count = 0;
+
+ iteration_count = 0;
+
+ /*
+ * Assign a baton to the current CPU and it is in charge of driving
+ * the test.
+ */
+ baton_cpu = read_mpidr_el1() & MPID_MASK;
+
+ /* Print SYSTEM SUSPEND statistics if PSCI STAT is supported */
+ if (is_psci_stat_count_supported()) {
+ NOTICE("PSCI STAT COUNT supported\n");
+ tftf_set_deepest_pstate_idx(PLAT_MAX_PWR_LEVEL, pstateid_idx);
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ pstateid_idx);
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("tftf_get_pstate_vars() failed"
+ " with ret = %x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ assert(pwrlvl == PLAT_MAX_PWR_LEVEL);
+
+ system_susp_pwr_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+
+ susp_count = tftf_psci_stat_count(baton_cpu, system_susp_pwr_state);
+ }
+
+ return do_sys_susp_on_off_stress();
+}
diff --git a/tftf/tests/runtime_services/standard_service/query_std_svc.c b/tftf/tests/runtime_services/standard_service/query_std_svc.c
new file mode 100644
index 0000000..e96d4ae
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/query_std_svc.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <psci.h>
+#include <smccc.h>
+#include <std_svc.h>
+#include <tftf_lib.h>
+#include <uuid_utils.h>
+
+/*
+ * Standard service UUID as returned by the implementation in the Trusted
+ * Firmware.
+ */
+static const uuid_t armtf_std_svc_uuid = {
+ 0x108d905b, 0xf863, 0x47e8, 0xae, 0x2d,
+ { 0xc0, 0xfb, 0x56, 0x41, 0xf6, 0xe2 }
+};
+
+/**
+ * @Test_Aim@ Query the Standard Service
+ *
+ * This test targets the implementation of the Standard Service in the Trusted
+ * Firmware. If it is interfaced with a different implementation then this test
+ * will most likely fail because the values returned by the service won't be the
+ * ones expected.
+ *
+ * The following queries are performed:
+ * 1) Call UID
+ * 2) Call count
+ * 3) Call revision details
+ */
+test_result_t test_query_std_svc(void)
+{
+ smc_args std_svc_args;
+ smc_ret_values ret;
+ uuid_t std_svc_uuid;
+ char uuid_str[UUID_STR_SIZE];
+ test_result_t test_result = TEST_RESULT_SUCCESS;
+
+ /* Standard Service Call UID */
+ std_svc_args.arg0 = SMC_STD_SVC_UID;
+ ret = tftf_smc(&std_svc_args);
+
+ make_uuid_from_4words(&std_svc_uuid,
+ ret.ret0, ret.ret1, ret.ret2, ret.ret3);
+ if (!uuid_equal(&std_svc_uuid, &armtf_std_svc_uuid)) {
+ tftf_testcase_printf("Wrong UUID: expected %s,\n",
+ uuid_to_str(&armtf_std_svc_uuid, uuid_str));
+ tftf_testcase_printf(" got %s\n",
+ uuid_to_str(&std_svc_uuid, uuid_str));
+ test_result = TEST_RESULT_FAIL;
+ }
+
+ /* Standard Service Call Count */
+ std_svc_args.arg0 = SMC_STD_SVC_CALL_COUNT;
+ ret = tftf_smc(&std_svc_args);
+
+ if (ret.ret0 == SMC_UNKNOWN) {
+ tftf_testcase_printf("Querying STD service call count"
+ " failed\n");
+ test_result = TEST_RESULT_FAIL;
+ } else {
+ tftf_testcase_printf("STD Service Call Count reported by firmware:"
+ " %llu\n", (unsigned long long)ret.ret0);
+ }
+
+ /* Standard Service Call Revision details */
+ std_svc_args.arg0 = SMC_STD_SVC_REVISION;
+ ret = tftf_smc(&std_svc_args);
+
+ if ((ret.ret0 != STD_SVC_REVISION_MAJOR) ||
+ (ret.ret1 != STD_SVC_REVISION_MINOR)) {
+ tftf_testcase_printf(
+ "Wrong Revision: expected {%u.%u}, got {%llu.%llu}\n",
+ STD_SVC_REVISION_MAJOR, STD_SVC_REVISION_MINOR,
+ (unsigned long long)ret.ret0,
+ (unsigned long long)ret.ret1);
+ test_result = TEST_RESULT_FAIL;
+ }
+
+ return test_result;
+}
diff --git a/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S b/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S
new file mode 100644
index 0000000..aae85fc
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <platform_def.h>
+#include <sdei.h>
+
+ .globl sdei_state_entrypoint
+ .globl sdei_entrypoint
+ .globl sdei_entrypoint_resume
+ .globl sdei_handler_done
+
+ .local event_handled
+ .comm event_handled, PLATFORM_CORE_COUNT * 4, 8
+
+#ifdef AARCH64
+func sdei_entrypoint
+ stp xzr, x30, [sp, #-16]!
+ bl sdei_event_handler
+ ldp xzr, x30, [sp],#16
+ mov_imm x0, SDEI_EVENT_COMPLETE
+ mov x1, xzr
+ smc #0
+ b .
+endfunc sdei_entrypoint
+
+func sdei_entrypoint_resume
+ stp x2, x30, [sp, #-16]!
+
+ /* Dispatch to C handler */
+ bl sdei_event_handler
+
+ /* Calculate address of event completion variable */
+ mrs x0, mpidr_el1
+ and x0, x0, #MPID_MASK
+ bl platform_get_core_pos
+ lsl x0, x0, #2
+ adrp x1, event_handled
+ add x1, x1, :lo12:event_handled
+ add x1, x0, x1
+
+ /* Mark event handling as complete so `sdei_handler_done` can return */
+ mov w2, #1
+ str w2, [x1]
+ sev
+
+ /* Populate `x0` and `x1` to prepare for SMC call */
+ ldp x1, x30, [sp], #16
+ mov_imm x0, SDEI_EVENT_COMPLETE_AND_RESUME
+ smc #0
+endfunc sdei_entrypoint_resume
+
+func sdei_handler_done
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+
+ /* Calculate address of event completion variable */
+ mrs x0, mpidr_el1
+ and x0, x0, #MPID_MASK
+ mov x29, x30
+ bl platform_get_core_pos
+ mov x30, x29
+ lsl x0, x0, #2
+ adrp x1, event_handled
+ add x1, x1, :lo12:event_handled
+ add x0, x0, x1
+
+again:
+ /*
+ * Wait until the timer interrupt fires, which will be handled
+ * as an SDEI event and take us to sdei_entrypoint_resume().
+ */
+ wfe
+ ldr w1, [x0]
+ cmp w1, #1
+ bne again
+
+ /* Reset event completion variable for next run */
+ mov w1, #0
+ str w1, [x0]
+
+ ldp x29, x30, [sp], #16
+ ret
+endfunc sdei_handler_done
+
+func sdei_state_entrypoint
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ blr x1
+
+ /* Calculate address of event completion variable */
+ mrs x0, mpidr_el1
+ and x0, x0, #MPID_MASK
+ bl platform_get_core_pos
+ lsl x0, x0, #2
+ adrp x1, event_handled
+ add x1, x1, :lo12:event_handled
+ add x1, x0, x1
+
+ /* Mark event handling as complete so `sdei_handler_done` can return */
+ mov w2, #1
+ str w2, [x1]
+ sev
+
+ ldp x29, x30, [sp],#16
+ mov_imm x0, SDEI_EVENT_COMPLETE
+ mov_imm x1, SDEI_EV_HANDLED
+ smc #0
+ b .
+endfunc sdei_state_entrypoint
+
+#else /* AARCH32 */
+func sdei_entrypoint
+ /* SDEI is not supported on AArch32. */
+ b .
+endfunc sdei_entrypoint
+
+func sdei_entrypoint_resume
+ /* SDEI is not supported on AArch32. */
+ b .
+endfunc sdei_entrypoint_resume
+
+func sdei_handler_done
+ /* SDEI is not supported on AArch32. */
+ b .
+endfunc sdei_handler_done
+
+func sdei_state_entrypoint
+ /* SDEI is not supported on AArch32. */
+ b .
+endfunc sdei_state_entrypoint
+#endif
diff --git a/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei.c b/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei.c
new file mode 100644
index 0000000..dc357c1
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <private_timer.h>
+#include <sdei.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#define EV_COOKIE 0xDEADBEEF
+#define TIMER_TIMEO_MS 10
+
+extern sdei_handler_t sdei_entrypoint;
+extern sdei_handler_t sdei_entrypoint_resume;
+
+/*
+ * the bound event number as returned from sdei_interrupt_bind(), passed
+ * to the per-cpu SDEI test function
+ */
+static int bound_ev;
+/* true if the test is using a private interrupt source, false otherwise. */
+static int private_interrupt;
+
+static spinlock_t cpu_count_lock;
+static volatile int cpu_count;
+static volatile int participating_cpu_count;
+
+/* Helper function to wait for CPUs participating in the test. */
+static void wait_for_participating_cpus(void)
+{
+ assert(participating_cpu_count <= PLATFORM_CORE_COUNT);
+
+ spin_lock(&cpu_count_lock);
+ cpu_count++;
+ spin_unlock(&cpu_count_lock);
+
+ assert(cpu_count <= PLATFORM_CORE_COUNT);
+
+ while (cpu_count != participating_cpu_count)
+ continue;
+}
+
+void sdei_trigger_event(void)
+{
+ printf("%s: triggering SDEI event\n", __func__);
+ if (private_interrupt)
+ private_timer_start(TIMER_TIMEO_MS);
+ else
+ tftf_program_timer(TIMER_TIMEO_MS);
+}
+
+static test_result_t sdei_event(void)
+{
+ long long ret;
+
+ wait_for_participating_cpus();
+
+ printf("%s: mpidr = 0x%llx\n", __func__,
+ (unsigned long long)read_mpidr_el1());
+
+ ret = sdei_event_register(bound_ev, sdei_entrypoint_resume, EV_COOKIE,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(bound_ev);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+ goto err0;
+ }
+
+ ret = sdei_pe_unmask();
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+ goto err1;
+ }
+
+ sdei_trigger_event();
+
+ sdei_handler_done();
+
+ sdei_pe_mask();
+
+err1:
+ sdei_event_disable(bound_ev);
+err0:
+ sdei_event_unregister(bound_ev);
+
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+int sdei_event_handler(int ev, unsigned long long arg)
+{
+ printf("%s: handler fired\n", __func__);
+ assert(arg == EV_COOKIE);
+ if (private_interrupt)
+ private_timer_stop();
+ else
+ tftf_cancel_timer();
+ return 0;
+}
+
+/* Handle an SDEI event on all cores in sequence. */
+test_result_t test_sdei_event_serial(void)
+{
+ struct sdei_intr_ctx intr_ctx;
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ long long ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = 1;
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ ret = sdei_version();
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ tftf_testcase_printf("Unexpected SDEI version: 0x%llx\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ disable_irq();
+ bound_ev = sdei_interrupt_bind(tftf_get_timer_irq(), &intr_ctx);
+ if (bound_ev < 0) {
+ tftf_testcase_printf("SDEI interrupt bind failed: %x\n",
+ bound_ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* use a shared interrupt source for this test-case */
+ private_interrupt = 0;
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)sdei_event, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x0x%llx\n",
+ (unsigned long long)target_mpid);
+ goto err0;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ assert(cpu_count == 0);
+
+ if (sdei_event() != TEST_RESULT_SUCCESS)
+ goto err0;
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ sdei_interrupt_release(bound_ev, &intr_ctx);
+ enable_irq();
+
+ return TEST_RESULT_SUCCESS;
+
+err0:
+ sdei_private_reset();
+ sdei_shared_reset();
+ sdei_interrupt_release(bound_ev, &intr_ctx);
+ enable_irq();
+ return TEST_RESULT_FAIL;
+}
+
+/* Handle an SDEI event on all cores in parallel. */
+test_result_t test_sdei_event_parallel(void)
+{
+ struct sdei_intr_ctx intr_ctx;
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ long long ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = tftf_get_total_cpus_count();
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ ret = sdei_version();
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ tftf_testcase_printf("Unexpected SDEI version: 0x%llx\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ disable_irq();
+ bound_ev = sdei_interrupt_bind(IRQ_PCPU_HP_TIMER, &intr_ctx);
+ if (bound_ev < 0) {
+ tftf_testcase_printf("SDEI interrupt bind failed: %x\n",
+ bound_ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* use a private interrupt source for this test-case */
+ private_interrupt = 1;
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)sdei_event, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x0x%llx\n",
+ (unsigned long long)target_mpid);
+ goto err0;
+ }
+ }
+
+ if (sdei_event() != TEST_RESULT_SUCCESS)
+ goto err0;
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ sdei_interrupt_release(bound_ev, &intr_ctx);
+ enable_irq();
+
+ return TEST_RESULT_SUCCESS;
+err0:
+ sdei_private_reset();
+ sdei_shared_reset();
+ sdei_interrupt_release(bound_ev, &intr_ctx);
+ enable_irq();
+ return TEST_RESULT_FAIL;
+}
+
+static test_result_t sdei_event_signal_self(void)
+{
+ long long ret;
+
+ ret = sdei_event_register(0, sdei_entrypoint_resume, EV_COOKIE,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(0);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+ goto err0;
+ }
+
+ ret = sdei_pe_unmask();
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+ goto err1;
+ }
+
+ ret = sdei_event_signal(read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: 0x%llx\n", ret);
+ goto err2;
+ }
+
+ sdei_handler_done();
+
+err2:
+ sdei_pe_mask();
+err1:
+ sdei_event_disable(0);
+err0:
+ sdei_event_unregister(0);
+
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Each core signals itself using SDEI event signalling. */
+test_result_t test_sdei_event_signal_serial(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ long long ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ ret = sdei_version();
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ tftf_testcase_printf("Unexpected SDEI version: 0x%llx\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ disable_irq();
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)sdei_event_signal_self, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x0x%llx\n",
+ (unsigned long long)target_mpid);
+ ret = -1;
+ goto err0;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ }
+
+ if (sdei_event_signal_self() != TEST_RESULT_SUCCESS) {
+ ret = -1;
+ goto err0;
+ }
+
+err0:
+ enable_irq();
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+ return TEST_RESULT_SUCCESS;
+}
+
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+
+static test_result_t sdei_wait_for_event_signal(void)
+{
+ int core_pos;
+ long long ret;
+
+ ret = sdei_event_register(0, sdei_entrypoint_resume, EV_COOKIE,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(0);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+ goto err0;
+ }
+
+ ret = sdei_pe_unmask();
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+ goto err1;
+ }
+
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ sdei_handler_done();
+
+ sdei_pe_mask();
+err1:
+ sdei_event_disable(0);
+err0:
+ sdei_event_unregister(0);
+
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * The primary core will signal all other cores
+ * use SDEI event signalling.
+ */
+test_result_t test_sdei_event_signal_all(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, core_pos;
+ int i;
+ long long ret;
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+ tftf_init_event(&cpu_ready[i]);
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ ret = sdei_version();
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ tftf_testcase_printf("Unexpected SDEI version: 0x%llx\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ disable_irq();
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)sdei_wait_for_event_signal, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x0x%llx\n",
+ (unsigned long long)target_mpid);
+ ret = -1;
+ goto err0;
+ }
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = sdei_event_signal(target_mpid);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: 0x%llx\n",
+ ret);
+ ret = -1;
+ goto err0;
+ }
+ }
+
+err0:
+ enable_irq();
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_state.c b/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_state.c
new file mode 100644
index 0000000..d4ca124
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_state.c
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sdei.h>
+#include <stdint.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+/*
+ * These functions test the SDEI handler state transition as listed in section
+ * in 6.1.2 of the specification.
+ */
+
+/* Aliases for SDEI handler states: 'R'unning, 'E'nabled, and re'G'istered */
+#define r_ 0
+#define R_ (1u << 2)
+
+#define e_ 0
+#define E_ (1u << 1)
+
+#define g_ 0
+#define G_ (1u << 0)
+
+/* All possible composite handler states */
+#define reg_ (r_ | e_ | g_)
+#define reG_ (r_ | e_ | G_)
+#define rEg_ (r_ | E_ | g_)
+#define rEG_ (r_ | E_ | G_)
+#define Reg_ (R_ | e_ | g_)
+#define ReG_ (R_ | e_ | G_)
+#define REg_ (R_ | E_ | g_)
+#define REG_ (R_ | E_ | G_)
+
+#define is_running(st) ((st & R_) != 0)
+#define is_enabled(st) ((st & E_) != 0)
+#define is_registered(st) ((st & G_) != 0)
+
+extern sdei_handler_t sdei_state_entrypoint;
+
+static int handler_success;
+static int32_t ev;
+
+/* Dummy handler that won't be actually called. */
+static int sdei_unreachable_handler(int ev, uint64_t arg)
+{
+ panic();
+}
+
+/* Test all failure transitions when handler state is handler unregistered */
+static test_result_t hdlr_unregistered(void)
+{
+ int64_t status, ret;
+ struct sdei_intr_ctx intr_ctx;
+
+ ev = sdei_interrupt_bind(tftf_get_timer_irq(), &intr_ctx);
+ if (ev < 0) {
+ printf("%d: SDEI interrupt bind failed; ret=%d\n",
+ __LINE__, ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Query the state of handler. Expect it to be unregistered */
+ status = sdei_event_status(ev);
+ if (status != reg_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, reg_);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(ev);
+ if (ret != -SMC_EDENY) {
+ printf("%d: ENABLE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_disable(ev);
+ if (ret != -SMC_EDENY) {
+ printf("%d: DISABLE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_unregister(ev);
+ if (ret != -SMC_EDENY) {
+ printf("%d: UNREGISTER returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure we're operating on a shared interrupt */
+ assert(tftf_get_timer_irq() >= 32);
+ ret = sdei_event_routing_set(ev, SDEI_REGF_RM_PE);
+ if (ret != -SMC_EDENY) {
+ printf("%d: ROUTING_SET returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_context(0);
+ if (ret != -SMC_EDENY) {
+ printf("%d: EVENT_CONTEXT returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_complete(SDEI_EV_HANDLED);
+ if (ret != -SMC_EDENY) {
+ printf("%d: COMPLETE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_complete_and_resume(0);
+ if (ret != -SMC_EDENY) {
+ printf("%d: COMPLETE_AND_RESUME returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret < 0) {
+ printf("%d: SDEI interrupt release failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t hdlr_registered(void)
+{
+ int64_t status, ret;
+ struct sdei_intr_ctx intr_ctx;
+
+ ev = sdei_interrupt_bind(tftf_get_timer_irq(), &intr_ctx);
+ if (ev < 0) {
+ printf("%d: SDEI interrupt bind failed; ret=%d\n",
+ __LINE__, ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Register with dummy values. We aren't going to trigger the event */
+ ret = sdei_event_register(ev, sdei_unreachable_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ printf("%d: SDEI interrupt register failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Query the state of handler. Expect it to be registered */
+ status = sdei_event_status(ev);
+ if (status != reG_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, reG_);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_register(ev, sdei_unreachable_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret != -SMC_EDENY) {
+ printf("%d: REGISTER returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret != -SMC_EDENY) {
+ printf("%d: RELEASE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_context(0);
+ if (ret != -SMC_EDENY) {
+ printf("%d: EVENT_CONTEXT returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_complete(SDEI_EV_HANDLED);
+ if (ret != -SMC_EDENY) {
+ printf("%d: COMPLETE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_complete_and_resume(0);
+ if (ret != -SMC_EDENY) {
+ printf("%d: COMPLETE_AND_RESUME returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_unregister(ev);
+ if (ret < 0) {
+ printf("%d: SDEI unregister failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret < 0) {
+ printf("%d: SDEI interrupt release failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* This is called from an SDEI handler */
+static void running_handler(int ev, unsigned long long arg)
+{
+ int64_t ret, status;
+ struct sdei_intr_ctx intr_ctx;
+
+ /* Cancel timer to prevent further triggers */
+ tftf_cancel_timer();
+
+ handler_success = 0;
+
+ /* Query the state of handler. Expect it to be registered */
+ status = sdei_event_status(ev);
+ if (status != REG_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, REG_);
+ return;
+ }
+
+ /* Call disable and check status again */
+ ret = sdei_event_disable(ev);
+ if (ret < 0) {
+ printf("%d: DISABLE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ status = sdei_event_status(ev);
+ if (status != ReG_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, ReG_);
+ return;
+ }
+
+ ret = sdei_event_register(ev, sdei_unreachable_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret != -SMC_EDENY) {
+ printf("%d: REGISTER returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret != -SMC_EDENY) {
+ printf("%d: RELEASE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ ret = sdei_event_routing_set(ev, SDEI_REGF_RM_PE);
+ if (ret != -SMC_EDENY) {
+ printf("%d: ROUTING_SET returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ handler_success = 1;
+}
+
+test_result_t hdlr_registered_running(void)
+{
+ int64_t status, ret;
+ struct sdei_intr_ctx intr_ctx;
+
+ ev = sdei_interrupt_bind(tftf_get_timer_irq(), &intr_ctx);
+ if (ev < 0) {
+ printf("%d: SDEI interrupt bind failed; ret=%d\n",
+ __LINE__, ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_register(ev, sdei_state_entrypoint,
+ (uintptr_t) &running_handler, SDEI_REGF_RM_PE,
+ read_mpidr_el1());
+ if (ret < 0) {
+ printf("%d: SDEI interrupt register failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Query the state of handler. Expect it to be registered */
+ status = sdei_event_status(ev);
+ if (status != reG_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, reG_);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(ev);
+ if (ret < 0) {
+ printf("%d: SDEI event enable failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ sdei_pe_unmask();
+ sdei_trigger_event();
+ sdei_handler_done();
+
+ ret = sdei_event_unregister(ev);
+ if (ret < 0) {
+ printf("%d: SDEI unregister failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret < 0) {
+ printf("%d: SDEI interrupt release failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (handler_success == 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* This is called from an SDEI handler */
+static void ureg_running_handler(int ev, unsigned long long arg)
+{
+ int64_t ret, status;
+ struct sdei_intr_ctx intr_ctx;
+
+ /* Cancel timer to prevent further triggers */
+ tftf_cancel_timer();
+
+ handler_success = 0;
+
+ /* Query the state of handler. Expect it to be registered */
+ status = sdei_event_status(ev);
+ if (!is_running(status)) {
+ printf("%d: Handler reported as not running\n", __LINE__);
+ return;
+ }
+
+ /*
+ * Unregister the event right away. Unregister while running must return
+ * pending error code.
+ */
+ ret = sdei_event_unregister(ev);
+ if (ret != -SMC_EPEND) {
+ printf("%d: SDEI unregister failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ /*
+ * Query the state of handler. Expect it to be running-only now that
+ * we've unregistered.
+ */
+ status = sdei_event_status(ev);
+ if (status != Reg_) {
+ printf("%d: Handler not reported as running-only\n", __LINE__);
+ return;
+ }
+
+ ret = sdei_event_register(ev, sdei_unreachable_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret != -SMC_EDENY) {
+ printf("%d: REGISTER returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret != -SMC_EDENY) {
+ printf("%d: RELEASE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ ret = sdei_event_enable(ev);
+ if (ret != -SMC_EDENY) {
+ printf("%d: ENABLE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ ret = sdei_event_disable(ev);
+ if (ret != -SMC_EDENY) {
+ printf("%d: DISABLE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ /* Unregister while running will return PEND */
+ ret = sdei_event_unregister(ev);
+ if (ret != -SMC_EPEND) {
+ printf("%d: UNREGISTER returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ ret = sdei_event_routing_set(ev, SDEI_REGF_RM_PE);
+ if (ret != -SMC_EDENY) {
+ printf("%d: ROUTING_SET returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ handler_success = 1;
+}
+
+test_result_t hdlr_unregistered_running(void)
+{
+ int64_t status, ret;
+ struct sdei_intr_ctx intr_ctx;
+
+ ev = sdei_interrupt_bind(tftf_get_timer_irq(), &intr_ctx);
+ if (ev < 0) {
+ printf("%d: SDEI interrupt bind failed; ret=%d\n",
+ __LINE__, ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_register(ev, sdei_state_entrypoint,
+ (uintptr_t) &ureg_running_handler, SDEI_REGF_RM_PE,
+ read_mpidr_el1());
+ if (ret < 0) {
+ printf("%d: SDEI interrupt register failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Query the state of handler. Expect it to be registered */
+ status = sdei_event_status(ev);
+ if (status != reG_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, reG_);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(ev);
+ if (ret < 0) {
+ printf("%d: SDEI event enable failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ sdei_pe_unmask();
+ sdei_trigger_event();
+ sdei_handler_done();
+
+ /*
+ * We've already unregistered the event within the handler, so this call
+ * must fail.
+ */
+ ret = sdei_event_unregister(ev);
+ if (ret != -SMC_EDENY) {
+ printf("%d: SDEI unregister failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret < 0) {
+ printf("%d: SDEI interrupt release failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (handler_success == 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t hdlr_enabled(void)
+{
+ int64_t status, ret;
+ struct sdei_intr_ctx intr_ctx;
+
+ ev = sdei_interrupt_bind(tftf_get_timer_irq(), &intr_ctx);
+ if (ev < 0) {
+ printf("%d: SDEI interrupt bind failed; ret=%d\n",
+ __LINE__, ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_register(ev, sdei_state_entrypoint,
+ (uintptr_t) &ureg_running_handler, SDEI_REGF_RM_PE,
+ read_mpidr_el1());
+ if (ret < 0) {
+ printf("%d: SDEI interrupt register failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(ev);
+ if (ret < 0) {
+ printf("%d: SDEI event enable failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Query the state of handler. Expect it to be both registered and
+ * enabled.
+ */
+ status = sdei_event_status(ev);
+ if (status != rEG_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, reG_);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_register(ev, sdei_unreachable_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret != -SMC_EDENY) {
+ printf("%d: REGISTER returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret != -SMC_EDENY) {
+ printf("%d: RELEASE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_routing_set(ev, SDEI_REGF_RM_PE);
+ if (ret != -SMC_EDENY) {
+ printf("%d: ROUTING_SET returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_context(0);
+ if (ret != -SMC_EDENY) {
+ printf("%d: EVENT_CONTEXT returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_complete(SDEI_EV_HANDLED);
+ if (ret != -SMC_EDENY) {
+ printf("%d: COMPLETE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_complete_and_resume(0);
+ if (ret != -SMC_EDENY) {
+ printf("%d: COMPLETE_AND_RESUME returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_unregister(ev);
+ if (ret < 0) {
+ printf("%d: SDEI unregister failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret < 0) {
+ printf("%d: SDEI interrupt release failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t iterate_state_machine(void)
+{
+ test_result_t ret;
+
+ printf("%d: Cranking SDEI state machine on %llx\n",
+ __LINE__, (unsigned long long) read_mpidr_el1());
+
+ ret = hdlr_unregistered();
+ if (ret != TEST_RESULT_SUCCESS) {
+ printf("%d: State test hdlr_unregistered failed\n", __LINE__);
+ return ret;
+ }
+
+ ret = hdlr_registered();
+ if (ret != TEST_RESULT_SUCCESS) {
+ printf("%d: State test hdlr_registered failed\n", __LINE__);
+ return ret;
+ }
+
+ ret = hdlr_registered_running();
+ if (ret != TEST_RESULT_SUCCESS) {
+ printf("%d: State test hdlr_registered_running failed\n",
+ __LINE__);
+ return ret;
+ }
+
+ ret = hdlr_unregistered_running();
+ if (ret != TEST_RESULT_SUCCESS) {
+ printf("%d: State test hdlr_unregistered_running failed\n",
+ __LINE__);
+ return ret;
+ }
+
+ ret = hdlr_enabled();
+ if (ret != TEST_RESULT_SUCCESS) {
+ printf("%d: State test hdlr_enabled failed\n", __LINE__);
+ return ret;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Have all CPUs run through the SDEI state machine.
+ */
+test_result_t test_sdei_state(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ int32_t aff_info __unused;
+ int64_t ret;
+
+ ret = sdei_version();
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ printf("%d: Unexpected SDEI version: 0x%llx\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid) {
+ /* Run on this CPU */
+ if (iterate_state_machine() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+ } else {
+ /* Power on other CPU to run through SDEI state machine */
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) iterate_state_machine, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long) target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait for other CPU to power down */
+ do {
+ aff_info = tftf_psci_affinity_info(target_mpid,
+ MPIDR_AFFLVL0);
+ } while (aff_info != PSCI_STATE_OFF);
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/unknown_smc.c b/tftf/tests/runtime_services/standard_service/unknown_smc.c
new file mode 100644
index 0000000..70175e3
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/unknown_smc.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <smccc.h>
+#include <std_svc.h>
+#include <tftf_lib.h>
+#include <uuid_utils.h>
+
+/* Invalid SMC FID: chose an identifier that falls in the reserved space */
+#define INVALID_FID (0x00ff0000 | (1u << 31))
+
+/**
+ * @Test_Aim@ Force an SMC_UNKNOWN return
+ */
+test_result_t test_unknown_smc(void)
+{
+ smc_args unk_smc;
+ smc_ret_values ret;
+
+ unk_smc.arg0 = INVALID_FID;
+ ret = tftf_smc(&unk_smc);
+
+ if (ret.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf("Expected SMC_UNKNOWN, got %ld\n",
+ (long int) ret.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/trusted_os/tsp/test_irq_preempted_std_smc.c b/tftf/tests/runtime_services/trusted_os/tsp/test_irq_preempted_std_smc.c
new file mode 100644
index 0000000..22e35db
--- /dev/null
+++ b/tftf/tests/runtime_services/trusted_os/tsp/test_irq_preempted_std_smc.c
@@ -0,0 +1,838 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <events.h>
+#include <gic_common.h>
+#include <gic_v2.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <sgi.h>
+#include <smccc.h>
+#include <string.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+#define TEST_ITERATIONS_COUNT 1000
+
+#define SUSPEND_TIME_1_SEC 1000
+
+#define TEST_VALUE_1 4
+#define TEST_VALUE_2 6
+
+static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
+static event_t cpu_has_finished_test[PLATFORM_CORE_COUNT];
+
+static volatile int requested_irq_received[PLATFORM_CORE_COUNT];
+static volatile int wakeup_irq_received[PLATFORM_CORE_COUNT];
+static volatile int individual_test_failed[PLATFORM_CORE_COUNT];
+static volatile int pwr_level_being_tested;
+static volatile int test_finished_flag;
+
+/* Dummy timer handler that sets a flag to check it has been called. */
+static int suspend_wakeup_handler(void *data)
+{
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ assert(wakeup_irq_received[core_pos] == 0);
+
+ wakeup_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/* Dummy handler that sets a flag so as to check it has been called. */
+static int test_handler(void *data)
+{
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ assert(requested_irq_received[core_pos] == 0);
+
+ requested_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/* Register a dummy handler for SGI #0 and enable it. Returns 0 if success. */
+static int register_and_enable_test_sgi_handler(unsigned int core_pos)
+{
+ /* SGIs #0 - #6 are freely available. */
+
+ int ret = tftf_irq_register_handler(IRQ_NS_SGI_0, test_handler);
+
+ if (ret != 0) {
+ tftf_testcase_printf(
+ "Failed to register SGI handler @ CPU %d (rc = %d)\n",
+ core_pos, ret);
+ return -1;
+ }
+
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ return 0;
+}
+
+/* Disable and unregister the dummy handler for SGI #0. */
+static void unregister_and_disable_test_sgi_handler(void)
+{
+ tftf_irq_disable(IRQ_NS_SGI_0);
+
+ tftf_irq_unregister_handler(IRQ_NS_SGI_0);
+}
+
+/*
+ * Generate a pre-empted STD SMC on the CPU who called this function. Steps:
+ * 1. IRQs are disabled.
+ * 2. An SGI is sent to itself. It cannot be handled because IRQs are disabled.
+ * 3. Invoke an STD SMC on the TSP, which is preempted by the pending SGI.
+ * 4. IRQs are enabled, the SGI is handled.
+ * 5. This function is exited with a preempted STD SMC waiting to be resumed.
+ */
+static int preempt_std_smc_on_this_cpu(void)
+{
+ smc_args std_smc_args;
+ smc_ret_values smc_ret;
+
+ int result = TEST_RESULT_SUCCESS;
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ if (register_and_enable_test_sgi_handler(core_pos) != 0) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Set PSTATE.I to 0. */
+ disable_irq();
+
+ /*
+ * Send SGI to itself. It can't be handled because the
+ * interrupts are disabled.
+ */
+ requested_irq_received[core_pos] = 0;
+
+ tftf_send_sgi(IRQ_NS_SGI_0, core_pos);
+
+ /*
+ * Invoke an STD SMC. Should be pre-empted because of the SGI
+ * that is waiting.
+ */
+ std_smc_args.arg0 = TSP_STD_FID(TSP_ADD);
+ std_smc_args.arg1 = TEST_VALUE_1;
+ std_smc_args.arg2 = TEST_VALUE_2;
+ smc_ret = tftf_smc(&std_smc_args);
+ if (smc_ret.ret0 != TSP_SMC_PREEMPTED) {
+ tftf_testcase_printf("SMC @ CPU %d returned 0x%llX.\n", core_pos,
+ (unsigned long long)smc_ret.ret0);
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Set PSTATE.I to 1. Let the SGI be handled. */
+ enable_irq();
+
+ /* Cleanup. Disable and unregister SGI handler. */
+ unregister_and_disable_test_sgi_handler();
+
+ /*
+ * Check that the SGI has been handled, but don't fail if it hasn't
+ * because there is no guarantee that it will have actually happened at
+ * this point.
+ */
+ if (requested_irq_received[core_pos] == 0) {
+ VERBOSE("SGI not handled @ CPU %d\n", core_pos);
+ }
+
+ return result;
+}
+
+/* Resume a pre-empted STD SMC on the CPU who called this function. */
+static int resume_std_smc_on_this_cpu(void)
+{
+ smc_args std_smc_args;
+ smc_ret_values smc_ret;
+
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ /* Resume the STD SMC. Verify result. */
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret = tftf_smc(&std_smc_args);
+ if ((smc_ret.ret0 != 0) || (smc_ret.ret1 != TEST_VALUE_1 * 2)
+ || (smc_ret.ret2 != TEST_VALUE_2 * 2)) {
+ tftf_testcase_printf(
+ "SMC @ CPU %d returned 0x%llX 0x%llX 0x%llX instead of 0x0 0x%X 0x%X\n",
+ core_pos, (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2,
+ TEST_VALUE_1 * 2, TEST_VALUE_2 * 2);
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Try to resume a pre-empted STD SMC on the CPU who called this function,
+ * but check for SMC_UNKNOWN as a result.
+ */
+static int resume_fail_std_smc_on_this_cpu(void)
+{
+ smc_args std_smc_args;
+ smc_ret_values smc_ret;
+
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ /* Resume the STD SMC. Verify result. */
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret = tftf_smc(&std_smc_args);
+ if (smc_ret.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf(
+ "SMC @ CPU %d returned 0x%llX 0x%llX 0x%llX instead of SMC_UNKNOWN\n",
+ core_pos, (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2);
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*******************************************************************************
+ * Test pre-emption during STD SMCs.
+ ******************************************************************************/
+
+/* Test routine for test_irq_preempted_std_smc. */
+static test_result_t test_irq_preempted_std_smc_fn(void)
+{
+ u_register_t cpu_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(cpu_mpid);
+
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+ for (unsigned int i = 0; i < TEST_ITERATIONS_COUNT; i++) {
+
+ if (preempt_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ if (resume_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Multicore preemption test. Tests IRQ preemption during STD SMC
+ * from multiple cores. Uses an SGI to trigger the preemption. TSP should be
+ * present.
+ *
+ * Steps: 1. Invoke Standard SMC on the TSP and try to preempt it via IRQ.
+ * 2. Resume the preempted SMC and verify the result.
+ *
+ * Returns SUCCESS if above 2 steps are performed correctly in every CPU else
+ * failure.
+ */
+test_result_t test_irq_preempted_std_smc(void)
+{
+ u_register_t cpu_mpid;
+ unsigned int cpu_node, core_pos;
+ int psci_ret;
+ u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ tftf_init_event(&cpu_has_entered_test[i]);
+ }
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_irq_preempted_std_smc_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU %d (rc = %d)\n",
+ core_pos, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wait until all CPUs have started the test. */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+ }
+
+ /* Enter the test on lead CPU and return the result. */
+ return test_irq_preempted_std_smc_fn();
+}
+
+/*
+ * Test routine for non-lead CPUs for test_resume_preempted_std_smc_other_cpus.
+ */
+static test_result_t test_resume_preempted_std_smc_other_cpus_non_lead_fn(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /*
+ * Try to resume the STD SMC invoked from the lead CPU. It shouldn't be
+ * able to do it.
+ */
+
+ smc_args std_smc_args;
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret_values smc_ret = tftf_smc(&std_smc_args);
+ if (smc_ret.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf(
+ "SMC @ lead CPU returned 0x%llX 0x%llX 0x%llX instead of SMC_UNKNOWN\n",
+ (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2);
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Signal to the lead CPU that the calling CPU has finished the test */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ Multicore preemption test. For a MP Secure Payload, the
+ * pre-emption on one CPU should not affect the other CPU. Trying to resume
+ * one STD SMC that was preempted on one CPU shouldn't be possible from any
+ * other CPU.
+ *
+ * Steps: 1. Issue Standard SMC and try preempting it via IRQ on lead CPU.
+ * 2. Try to resume it from the rest of the CPUs sequentially.
+ * 3. Resume the preempted SMC from the lead CPU and verify the result.
+ *
+ * Returns SUCCESS if step 2 fails and steps 1 and 3 succeed, else failure.
+ */
+test_result_t test_resume_preempted_std_smc_other_cpus(void)
+{
+ int i;
+ u_register_t cpu_mpid;
+ unsigned int cpu_node, core_pos;
+ int psci_ret;
+
+ u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /*
+ * Invoke a STD SMC that will be pre-empted.
+ */
+ if (preempt_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Try to resume the STD SMC from the rest of CPUs. It shouldn't be
+ * possible.
+ */
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ tftf_init_event(&cpu_has_finished_test[i]);
+ }
+
+ /* Power on all CPUs and perform test sequentially. */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it's the one with the pre-empted STD SMC. */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_resume_preempted_std_smc_other_cpus_non_lead_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU %d (rc = %d)\n",
+ core_pos, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait until the test is finished to begin with the next CPU. */
+ tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
+ }
+
+ /*
+ * Try to resume the STD SMC from the lead CPU. It should be able to do
+ * it and to return the correct result.
+ */
+ return resume_std_smc_on_this_cpu();
+}
+
+/* Test routine for secondary CPU for test_resume_different_cpu_preempted_std_smc */
+static test_result_t test_resume_different_cpu_preempted_std_smc_non_lead_fn(void)
+{
+ smc_args std_smc_args;
+ smc_ret_values smc_ret;
+
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+ /* Register and enable SGI. SGIs #0 - #6 are freely available. */
+ if (register_and_enable_test_sgi_handler(core_pos) != 0) {
+ /* Signal to the lead CPU that the calling CPU has finished */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Set PSTATE.I to 0. */
+ disable_irq();
+
+ /*
+ * Send SGI to itself. It can't be handled because the interrupts are
+ * disabled.
+ */
+ requested_irq_received[core_pos] = 0;
+
+ tftf_send_sgi(IRQ_NS_SGI_0, core_pos);
+
+ /*
+ * Invoke an STD SMC. Should be pre-empted because of the SGI that is
+ * waiting. It has to be different than the one invoked from the lead
+ * CPU.
+ */
+ std_smc_args.arg0 = TSP_STD_FID(TSP_MUL);
+ std_smc_args.arg1 = TEST_VALUE_1;
+ std_smc_args.arg2 = TEST_VALUE_2;
+ smc_ret = tftf_smc(&std_smc_args);
+ if (smc_ret.ret0 != TSP_SMC_PREEMPTED) {
+ tftf_testcase_printf(
+ "SMC @ CPU %d returned 0x%llX instead of TSP_SMC_PREEMPTED.\n",
+ core_pos, (unsigned long long)smc_ret.ret0);
+ enable_irq();
+ unregister_and_disable_test_sgi_handler();
+ /* Signal to the lead CPU that the calling CPU has finished */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Set PSTATE.I to 1. Let the SGI be handled. */
+ enable_irq();
+
+ /* Cleanup. Disable and unregister SGI handler. */
+ unregister_and_disable_test_sgi_handler();
+
+ /*
+ * Check that the SGI has been handled, but don't fail if it hasn't
+ * because there is no guarantee that it will have actually happened at
+ * this point.
+ */
+ if (requested_irq_received[core_pos] == 0) {
+ VERBOSE("SGI not handled @ CPU %d\n", core_pos);
+ }
+
+ /* Resume the STD SMC. Verify result. */
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret = tftf_smc(&std_smc_args);
+ if ((smc_ret.ret0 != 0) || (smc_ret.ret1 != TEST_VALUE_1*TEST_VALUE_1)
+ || (smc_ret.ret2 != TEST_VALUE_2*TEST_VALUE_2)) {
+ tftf_testcase_printf(
+ "SMC @ CPU %d returned 0x%llX 0x%llX 0x%llX instead of 0x0 0x%X 0x%X\n",
+ core_pos, (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2,
+ TEST_VALUE_1*2, TEST_VALUE_2*2);
+ /* Signal to the lead CPU that the calling CPU has finished */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Try to resume the lead CPU STD SMC. Verify result. */
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret = tftf_smc(&std_smc_args);
+ if (smc_ret.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf(
+ "SMC @ CPU %d returned 0x%llX 0x%llX 0x%llX instead of SMC_UNKNOWN\n",
+ core_pos, (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2);
+ /* Signal to the lead CPU that the calling CPU has finished */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Signal to the lead CPU that the calling CPU has finished the test */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Multicore preemption test. For a MP Secure Payload, the
+ * pre-emption on one CPU should not affect the other CPU. Trying to resume
+ * one STD SMC pre-empted on one CPU shouldn't be possible from any other CPU
+ * involved in the test, and the STD SMC that is resumed from each CPU should
+ * be the same one that was invoked from it.
+ *
+ * Steps: 1. Lead and secondary CPUs set different preempted STD SMCs.
+ * 2. Resume the preempted SMC from secondary CPU. Verify the result.
+ * 3. Try to resume again to check if it can resume the lead SMC.
+ * 4. Resume the preempted SMC from lead CPU. Verify the result.
+ *
+ * Returns SUCCESS if steps 1, 2 and 4 succeed and step 3 fails, else failure.
+ */
+test_result_t test_resume_different_cpu_preempted_std_smc(void)
+{
+ smc_args std_smc_args;
+ smc_ret_values smc_ret;
+ u_register_t cpu_mpid;
+ unsigned int core_pos;
+ int psci_ret;
+
+ u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int lead_pos = platform_get_core_pos(lead_mpid);
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /*
+ * Generate a SGI on the lead CPU that can't be handled because the
+ * interrupts are disabled.
+ */
+ register_and_enable_test_sgi_handler(lead_mpid);
+ disable_irq();
+
+ requested_irq_received[lead_pos] = 0;
+
+ tftf_send_sgi(IRQ_NS_SGI_0, lead_pos);
+
+ /*
+ * Invoke an STD SMC. Should be pre-empted because of the SGI that is
+ * waiting.
+ */
+ std_smc_args.arg0 = TSP_STD_FID(TSP_ADD);
+ std_smc_args.arg1 = TEST_VALUE_1;
+ std_smc_args.arg2 = TEST_VALUE_2;
+ smc_ret = tftf_smc(&std_smc_args);
+ if (smc_ret.ret0 != TSP_SMC_PREEMPTED) {
+ tftf_testcase_printf(
+ "SMC @ lead CPU returned 0x%llX instead of TSP_SMC_PREEMPTED.\n",
+ (unsigned long long)smc_ret.ret0);
+ enable_irq();
+ unregister_and_disable_test_sgi_handler();
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Set PSTATE.I to 1. Let the SGI be handled. */
+ enable_irq();
+
+ /* Cleanup. Disable and unregister SGI handler. */
+ unregister_and_disable_test_sgi_handler();
+
+ /*
+ * Check that the SGI has been handled, but don't fail if it hasn't
+ * because there is no guarantee that it will have actually happened at
+ * this point.
+ */
+ if (requested_irq_received[lead_pos] == 0) {
+ VERBOSE("SGI not handled @ lead CPU.\n");
+ }
+
+ /* Generate a preempted SMC in a secondary CPU. */
+ cpu_mpid = tftf_find_any_cpu_other_than(lead_mpid);
+ if (cpu_mpid == INVALID_MPID) {
+ tftf_testcase_printf("Couldn't find another CPU.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_init_event(&cpu_has_finished_test[core_pos]);
+
+ psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t)
+ test_resume_different_cpu_preempted_std_smc_non_lead_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU %d (rc = %d)\n",
+ core_pos, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait until the test is finished to continue. */
+ tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
+
+ /*
+ * Try to resume the STD SMC from the lead CPU. It should be able resume
+ * the one it generated before and to return the correct result.
+ */
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret = tftf_smc(&std_smc_args);
+ if ((smc_ret.ret0 != 0) || (smc_ret.ret1 != TEST_VALUE_1 * 2) ||
+ (smc_ret.ret2 != TEST_VALUE_2 * 2)) {
+ tftf_testcase_printf(
+ "SMC @ lead CPU returned 0x%llX 0x%llX 0x%llX instead of 0x0 0x%X 0x%X\n",
+ (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2,
+ TEST_VALUE_1*2, TEST_VALUE_2*2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*******************************************************************************
+ * Test PSCI APIs while preempted.
+ ******************************************************************************/
+
+/*
+ * First part of the test routine for test_psci_cpu_on_off_preempted.
+ * Prepare a pre-empted STD SMC.
+ */
+static test_result_t test_psci_cpu_on_off_preempted_non_lead_fn_1(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ if (preempt_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Signal to the lead CPU that the calling CPU has entered the test
+ * conditions for the second part.
+ */
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+ /*
+ * Now this CPU has to be turned off. Since this is not a lead CPU, it
+ * will be done in run_tests(). If it was done here, cpus_cnt wouldn't
+ * decrement and the tftf would think there is still a CPU running, so
+ * it wouldn't finish.
+ *
+ * The result will be overwritten when the second part of the test is
+ * executed.
+ */
+ return result;
+}
+
+/*
+ * Second part of the test routine for test_psci_cpu_on_off_preempted.
+ * Try to resume the previously pre-empted STD SMC.
+ */
+static test_result_t test_psci_cpu_on_off_preempted_non_lead_fn_2(void)
+{
+ test_result_t result;
+
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Try to resume the STD SMC. Check that it fails. */
+ result = resume_fail_std_smc_on_this_cpu();
+
+ /* Signal to the lead CPU that the calling CPU has finished the test */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ Resume preempted STD SMC after PSCI CPU OFF/ON cycle.
+ *
+ * Steps: 1. Each CPU sets a preempted STD SMC.
+ * 2. They send an event to the lead CPU and call PSCI CPU OFF.
+ * 3. The lead CPU invokes PSCI CPU ON for the secondaries (warm boot).
+ * 4. Try to resume the preempted STD SMC on secondary CPUs.
+ *
+ * Returns SUCCESS if steps 1, 2 or 3 succeed and step 4 fails, else failure.
+ */
+test_result_t test_psci_cpu_on_off_preempted_std_smc(void)
+{
+ int i;
+ int all_powered_down;
+ u_register_t cpu_mpid;
+ unsigned int cpu_node, core_pos;
+ int psci_ret;
+ u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ tftf_init_event(&cpu_has_entered_test[i]);
+ tftf_init_event(&cpu_has_finished_test[i]);
+ }
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_psci_cpu_on_off_preempted_non_lead_fn_1, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU %d (rc = %d)\n",
+ core_pos, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wait for non-lead CPUs to exit the first part of the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+ }
+
+ /* Check that all secondary CPUs are powered off. */
+ all_powered_down = 0;
+ while (all_powered_down == 0) {
+ all_powered_down = 1;
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+ if (tftf_is_cpu_online(cpu_mpid) != 0) {
+ all_powered_down = 0;
+ }
+ }
+ }
+
+ /* Start the second part of the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_psci_cpu_on_off_preempted_non_lead_fn_2, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (rc = %d)\n",
+ core_pos, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wait for non-lead CPUs to finish the second part of the test. */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/******************************************************************************/
+
+/*
+ * @Test_Aim@ Resume preempted STD SMC after PSCI SYSTEM SUSPEND (in case it is
+ * supported).
+ *
+ * Steps: 1. The lead CPU sets a preempted STD SMC.
+ * 2. It calls PSCI SYSTEM SUSPEND with a wakeup timer for 1 sec.
+ * 3. Try to resume the preempted STD SMC.
+ *
+ * Returns SUCCESS if steps 1 and 2 succeed and step 3 fails.
+ */
+test_result_t test_psci_system_suspend_preempted_std_smc(void)
+{
+ int psci_ret;
+ int result = TEST_RESULT_SUCCESS;
+
+ u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int lead_pos = platform_get_core_pos(lead_mpid);
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ if (!is_psci_sys_susp_supported()) {
+ tftf_testcase_printf(
+ "SYSTEM_SUSPEND is not supported.\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (preempt_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!is_sys_suspend_state_ready()) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Prepare wakeup timer. IRQs need to be enabled. */
+ wakeup_irq_received[lead_pos] = 0;
+
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ /* Program timer to fire interrupt after timer expires */
+ tftf_program_timer(SUSPEND_TIME_1_SEC);
+
+ /* Issue PSCI_SYSTEM_SUSPEND. */
+ psci_ret = tftf_system_suspend();
+
+ while (!wakeup_irq_received[lead_pos])
+ ;
+
+ if (psci_ret != PSCI_E_SUCCESS) {
+ mp_printf("SYSTEM_SUSPEND from lead CPU failed. ret: 0x%x\n",
+ psci_ret);
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Remove timer after waking up.*/
+ tftf_cancel_timer();
+ tftf_timer_unregister_handler();
+
+ if (resume_fail_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ return result;
+}
diff --git a/tftf/tests/runtime_services/trusted_os/tsp/test_irq_spurious_gicv2.c b/tftf/tests/runtime_services/trusted_os/tsp/test_irq_spurious_gicv2.c
new file mode 100644
index 0000000..fd6a14a
--- /dev/null
+++ b/tftf/tests/runtime_services/trusted_os/tsp/test_irq_spurious_gicv2.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <events.h>
+#include <gic_v2.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+#define TEST_VALUE_1 4
+#define TEST_VALUE_2 6
+
+#define TEST_SPURIOUS_ITERATIONS_COUNT 1000000
+
+#define TEST_SPI_ID (MIN_SPI_ID + 2)
+
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+static volatile int requested_irq_received[PLATFORM_CORE_COUNT];
+static volatile int test_finished_flag;
+
+static volatile int spurious_count[PLATFORM_CORE_COUNT];
+static volatile int preempted_count[PLATFORM_CORE_COUNT];
+
+/* Dummy handler that sets a flag so as to check it has been called. */
+static int test_handler(void *data)
+{
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ assert(requested_irq_received[core_pos] == 0);
+
+ requested_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/* Dummy handler that increases a variable to check if it has been called. */
+static int test_spurious_handler(void *data)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ spurious_count[core_pos]++;
+
+ return 0;
+}
+
+/* Helper function for test_juno_multicore_spurious_interrupt. */
+static test_result_t test_juno_multicore_spurious_interrupt_non_lead_fn(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ while (test_finished_flag == 0) {
+
+ smc_args std_smc_args;
+ smc_ret_values smc_ret;
+
+ /* Invoke an STD SMC. */
+ std_smc_args.arg0 = TSP_STD_FID(TSP_ADD);
+ std_smc_args.arg1 = TEST_VALUE_1;
+ std_smc_args.arg2 = TEST_VALUE_2;
+ smc_ret = tftf_smc(&std_smc_args);
+
+ while (result != TEST_RESULT_FAIL) {
+ if (smc_ret.ret0 == 0) {
+ /* Verify result */
+ if ((smc_ret.ret1 != TEST_VALUE_1 * 2) ||
+ (smc_ret.ret2 != TEST_VALUE_2 * 2)) {
+ tftf_testcase_printf(
+ "SMC @ CPU %d returned 0x0 0x%llX 0x%llX instead of 0x0 0x%X 0x%X\n",
+ core_pos,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2,
+ TEST_VALUE_1 * 2, TEST_VALUE_2 * 2);
+ result = TEST_RESULT_FAIL;
+ } else {
+ /* Correct, exit inner loop */
+ break;
+ }
+ } else if (smc_ret.ret0 == TSP_SMC_PREEMPTED) {
+ /* Resume the STD SMC. */
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret = tftf_smc(&std_smc_args);
+ preempted_count[core_pos]++;
+ } else {
+ /* Error */
+ tftf_testcase_printf(
+ "SMC @ lead CPU returned 0x%llX 0x%llX 0x%llX\n",
+ (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2);
+ mp_printf("Panic <others> %d\n", core_pos);
+ result = TEST_RESULT_FAIL;
+ }
+ }
+
+ if (result == TEST_RESULT_FAIL)
+ break;
+ }
+
+ /* Signal to the lead CPU that the calling CPU has finished the test */
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ Test Spurious interrupt handling. GICv2 only. Only works if TF
+ * is compiled with TSP_NS_INTR_ASYNC_PREEMPT = 0.
+ *
+ * Steps: 1. Setup SPI handler and spurious interrupt handler on the lead CPU.
+ * 2. Redirect SPI interrupts to all CPUs.
+ * 3. Turn on secondary CPUs and make them invoke STD SMC all time.
+ * 4. The lead CPU starts a loop that triggers a SPI so that all CPUs
+ * will try to handle it.
+ * 5. The CPUs that can't handle the SPI will receive a spurious
+ * interrupt and increase a counter.
+ * 6. Check that there have been spurious interrupts. Not necessarily
+ * the number of (CPU - 1) * iterations as the SMC may need time to
+ * handle.
+ *
+ * Returns SUCCESS if all steps succeed, else failure.
+ */
+test_result_t test_juno_multicore_spurious_interrupt(void)
+{
+ int i, j;
+ u_register_t cpu_mpid;
+ unsigned int cpu_node, core_pos;
+ int psci_ret;
+ int ret;
+
+ u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ ret = tftf_irq_register_handler(GIC_SPURIOUS_INTERRUPT,
+ test_spurious_handler);
+ if (ret != 0) {
+ tftf_testcase_printf(
+ "Failed to register spurious handler. Error = %d\n",
+ ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Reset test variables and boot secondary cores. */
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ spurious_count[i] = 0;
+ preempted_count[i] = 0;
+ }
+
+ test_finished_flag = 0;
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+ tftf_init_event(&cpu_ready[i]);
+
+ dsbsy();
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_juno_multicore_spurious_interrupt_non_lead_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ (unsigned int)cpu_mpid, psci_ret);
+ test_finished_flag = 1;
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for non-lead CPUs to enter the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ tftf_init_event(&cpu_ready[i]);
+ }
+
+ /* Wait until tftf_init_event is seen by all cores */
+ dsbsy();
+
+ /* Register SPI handler (for all CPUs) and enable it. */
+ ret = tftf_irq_register_handler(TEST_SPI_ID, test_handler);
+ if (ret != 0) {
+ tftf_testcase_printf(
+ "Failed to register SPI handler @ lead CPU. Error code = %d\n",
+ ret);
+ tftf_irq_unregister_handler(GIC_SPURIOUS_INTERRUPT);
+ test_finished_flag = 1;
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Enable IRQ and route it to this CPU. */
+ tftf_irq_enable(TEST_SPI_ID, GIC_HIGHEST_NS_PRIORITY);
+
+ /* Route interrupts to all CPUs */
+ gicv2_set_itargetsr_value(TEST_SPI_ID, 0xFF);
+
+ for (j = 0; j < TEST_SPURIOUS_ITERATIONS_COUNT; j++) {
+
+ /* Clear handled flags. */
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+ requested_irq_received[i] = 0;
+ dsbsy();
+
+ /* Request SPI */
+ gicv2_gicd_set_ispendr(TEST_SPI_ID);
+
+ /* Wait until it is handled. */
+ int wait = 1;
+
+ while (wait) {
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if (requested_irq_received[i])
+ wait = 0;
+ }
+ }
+ }
+
+ test_finished_flag = 1;
+
+ /* Wait for non-lead CPUs to finish the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ /* Cleanup. */
+ tftf_irq_disable(TEST_SPI_ID);
+
+ tftf_irq_unregister_handler(TEST_SPI_ID);
+ tftf_irq_unregister_handler(GIC_SPURIOUS_INTERRUPT);
+
+ /* Check results. */
+ unsigned int total_spurious_count = 0;
+ unsigned int total_preempted_count = 0;
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ total_spurious_count += spurious_count[i];
+ total_preempted_count += preempted_count[i];
+ }
+
+ /* Check that the test has tested the behaviour. */
+ if (total_spurious_count == 0) {
+ tftf_testcase_printf("No spurious interrupts were handled.\n"
+ "The TF-A must be compiled with TSP_NS_INTR_ASYNC_PREEMPT = 0\n");
+ /*
+ * Don't flag the test as failed in case the TF-A was compiled
+ * with TSP_NS_INTR_ASYNC_PREEMPT=1.
+ */
+ return TEST_RESULT_SKIPPED;
+ }
+
+
+ if (total_preempted_count == 0) {
+ tftf_testcase_printf("No preempted STD SMCs.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/trusted_os/tsp/test_normal_int_switch.c b/tftf/tests/runtime_services/trusted_os/tsp/test_normal_int_switch.c
new file mode 100644
index 0000000..c222389
--- /dev/null
+++ b/tftf/tests/runtime_services/trusted_os/tsp/test_normal_int_switch.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <debug.h>
+#include <irq.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <sgi.h>
+#include <smccc.h>
+#include <std_svc.h>
+#include <stdlib.h>
+#include <string.h>
+#include <test_helpers.h>
+#include <tftf.h>
+
+#define STRESS_COUNT 100
+
+/*
+ * The shared data between the handler and the
+ * preempt_tsp_via_SGI routine.
+ */
+typedef struct {
+ smc_ret_values tsp_result;
+ int wait_for_fiq;
+} irq_handler_shared_data;
+
+static irq_handler_shared_data shared_data;
+
+/*
+ * Handler for the SGI #0.
+ */
+static int sgi_handler(void *data)
+{
+ /* Ensure this is the SGI we expect */
+ assert(*(unsigned int *)data == IRQ_NS_SGI_0);
+
+ if (shared_data.wait_for_fiq)
+ wfi(); /* We will get woken by the FIQ firing */
+
+ return 0;
+}
+
+/*
+ * This routine issues a SGI with interrupts disabled to make sure that the
+ * pending SGI will preempt a STD SMC.
+ */
+static test_result_t preempt_tsp_via_SGI(const smc_args *tsp_svc_params,
+ int hold_irq_handler_for_fiq)
+{
+ int rc;
+ unsigned int core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ memset(&shared_data, 0, sizeof(shared_data));
+
+ if (hold_irq_handler_for_fiq)
+ shared_data.wait_for_fiq = 1;
+
+ /* Register Handler for the interrupt. SGIs #0 - #6 are available. */
+ rc = tftf_irq_register_handler(IRQ_NS_SGI_0, sgi_handler);
+ if (rc != 0) {
+ tftf_testcase_printf("Failed to register SGI handler. "
+ "Error code = %d\n", rc);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Enable SGI #0 */
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ /* Set PSTATE.I to 0. */
+ disable_irq();
+
+ /*
+ * Send SGI to the current CPU. It can't be handled because the
+ * interrupts are disabled.
+ */
+ tftf_send_sgi(IRQ_NS_SGI_0, core_pos);
+
+ /*
+ * Invoke an STD SMC. Should be pre-empted because of the SGI that is
+ * waiting.
+ */
+ shared_data.tsp_result = tftf_smc(tsp_svc_params);
+ if (shared_data.tsp_result.ret0 != TSP_SMC_PREEMPTED) {
+ tftf_testcase_printf("SMC returned 0x%llX instead of "
+ "TSP_SMC_PREEMPTED.\n",
+ (unsigned long long)shared_data.tsp_result.ret0);
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Set PSTATE.I to 1. The SGI will be handled after this. */
+ enable_irq();
+
+ /* Disable SGI #0 */
+ tftf_irq_disable(IRQ_NS_SGI_0);
+
+ /* Unregister handler */
+ rc = tftf_irq_unregister_handler(IRQ_NS_SGI_0);
+ if (rc != 0) {
+ tftf_testcase_printf("Failed to unregister IRQ handler. "
+ "Error code = %d\n", rc);
+ result = TEST_RESULT_FAIL;
+ }
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ Test the secure world preemption by non secure interrupt.
+ *
+ * Steps: 1. Issue Standard SMC and preempt it via SGI
+ * 2. Resume the preempted SMC
+ * Returns SUCCESS if above 2 steps are performed correctly else failure.
+ */
+test_result_t tsp_int_and_resume(void)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values tsp_result = {0};
+ test_result_t res;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the addition */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 8 ||
+ tsp_result.ret2 != 12) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 8 12\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ tftf_testcase_printf("SMC resume returned wrong result\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_SUB);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the substraction */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 0 ||
+ tsp_result.ret2 != 0) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 0 0\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_MUL);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the multiplication */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 16 ||
+ tsp_result.ret2 != 36) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 16 36\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_DIV);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the division */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 1 ||
+ tsp_result.ret2 != 1) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 1 1\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Verify Fast SMC request on an interrupted tsp returns error.
+ *
+ * Steps: 1. Issue Standard SMC and preempt it via SGI
+ * 2. Issue Fast SMC, this is not expected and TSP should return error.
+ * 3. Resume the preempted SMC and verify the result.
+ * Returns SUCCESS if above 3 steps are performed correctly else failure.
+ */
+test_result_t test_fast_smc_when_tsp_preempted(void)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values tsp_result = {0};
+ test_result_t res = TEST_RESULT_SUCCESS;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue Fast SMC */
+ tsp_svc_params.arg0 = TSP_FAST_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ if (tsp_result.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf("Fast SMC should not execute"
+ "while SMC is preempted\n");
+ res = TEST_RESULT_FAIL;
+ }
+
+ /* Issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the addition */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 8 ||
+ tsp_result.ret2 != 12) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 8 12\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+
+ res = TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
+
+/*
+ * @Test_Aim@ Test the Standard SMC when tsp is pre-empted by interrupt.
+ *
+ * Steps:
+ * 1. Issue Standard SMC and preempt it via SGI
+ * 2. Issue another Standard SMC. this is not expected and TSP should return
+ * error.
+ * 3. Resume the preempted SMC or abort if the parameter `abort_smc` is set to
+ * 1.
+ * 4. Check the result if the SMC was resumed, or just carry on if it was
+ * aborted.
+ * Returns SUCCESS if above 4 steps are performed correctly else failure.
+ */
+static test_result_t test_std_smc_when_tsp_preempted(int abort_smc)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values tsp_result = {0};
+ test_result_t res = TEST_RESULT_SUCCESS;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ if (tsp_result.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf("Standard SMC should not execute while SMC is preempted\n");
+ res = TEST_RESULT_FAIL;
+ }
+
+ /* Issue ABORT or RESUME */
+ tsp_svc_params.arg0 = abort_smc ? TSP_FID_ABORT : TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /*
+ * There is no way to check that the ABORT succeeded or failed because
+ * it will return SMC_UNKNOWN in both cases.
+ */
+ if (!abort_smc) {
+ /*
+ * Check the result of the addition if we issued RESUME.
+ */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 8 ||
+ tsp_result.ret2 != 12) {
+ tftf_testcase_printf("SMC resume returned wrong result: got %d %d %d expected: 0 8 12\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ res = TEST_RESULT_FAIL;
+ }
+ }
+
+ return res;
+}
+
+test_result_t test_std_smc_when_tsp_preempted_resume(void)
+{
+ return test_std_smc_when_tsp_preempted(0);
+}
+
+test_result_t test_std_smc_when_tsp_preempted_abort(void)
+{
+ return test_std_smc_when_tsp_preempted(1);
+}
+
+/*
+ * @Test_Aim@ Test RESUME SMC call when TSP is not preempted. RESUME should fail.
+ *
+ * Issues resume SMC. This is not expected by TSP and returns error.
+ * This is a negative test, Return SUCCESS is RESUME returns SMC_UNKNOWN
+ */
+test_result_t test_resume_smc_without_preemption(void)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values tsp_result = {0};
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /* Issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ if (tsp_result.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf("SMC Resume should return UNKNOWN, got:%d\n", \
+ (unsigned int)tsp_result.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Stress Test the secure world preemption by non secure interrupt
+ *
+ * Steps: 1. Issue Standard SMC and preempt it via SGI
+ * 2. Resume the preempted SMC and repeat from Step 1 for STRESS_COUNT times.
+ * Returns SUCCESS if above 2 steps are performed correctly else failure.
+ */
+test_result_t tsp_int_and_resume_stress(void)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values tsp_result = {0};
+ test_result_t res = TEST_RESULT_SUCCESS;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ int count = 0;
+
+ NOTICE("This stress test will repeat %d times\n", STRESS_COUNT);
+ while ((count < STRESS_COUNT) &&
+ (res == TEST_RESULT_SUCCESS)) {
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ /* Try to preempt TSP via IRQ */
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the addition */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 8 ||
+ tsp_result.ret2 != 12) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 8 12\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ res = TEST_RESULT_FAIL;
+ }
+
+ count++;
+ }
+ return res;
+}
+
+/*
+ * @Test_Aim@ Test Secure FIQ when pre-empted by non secure interrupt.
+ *
+ * We really cannot verify whether FIQ fired and preempted the SGI handler
+ * or not. The TSP prints the address at which the execution was interrupted
+ * for the FIQ. By looking at the address printed from the TSP logs, we can
+ * verify that the SGI handler was interrupted by FIQ. For now, We are assuming
+ * CPU is woken by Secure Timer Interrupt.
+ *
+ * Steps: 1. Issue Standard SMC and preempt it via SGI
+ * 2. Wait in the SGI handler for FIQ which is firing every 500 ms.
+ * 3. Resume the preempted SMC
+ * Returns SUCCESS if above 3 steps are performed correctly else failure.
+ */
+test_result_t tsp_fiq_while_int(void)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values tsp_result = {0};
+ test_result_t res;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 1);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the addition */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 8 ||
+ tsp_result.ret2 != 12) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 8 12\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/trusted_os/tsp/test_smc_tsp_std_fn_call.c b/tftf/tests/runtime_services/trusted_os/tsp/test_smc_tsp_std_fn_call.c
new file mode 100644
index 0000000..8363676
--- /dev/null
+++ b/tftf/tests/runtime_services/trusted_os/tsp/test_smc_tsp_std_fn_call.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <smccc.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+/**
+ * @Test_Aim@ test_smc_tsp_std_fns_call - Query standard function information
+ * against TrustedOS service calls.
+ *
+ * This test targets the TSP, i.e. the Trusted Firmware-A Test Secure-EL1
+ * Payload. If there is no Trusted OS in the software stack, or if it is not
+ * the TSP, this test will be skipped.
+ *
+ * The following queries are performed:
+ * 1) UID
+ * 2) Call count
+ * 3) Call revision info
+ */
+test_result_t test_smc_tsp_std_fns_call(void)
+{
+ smc_args std_svc_args;
+ smc_ret_values ret;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /* TrustedOS Service Call Count */
+ std_svc_args.arg0 = SMC_TOS_CALL_COUNT;
+ ret = tftf_smc(&std_svc_args);
+ if (ret.ret0 != TSP_NUM_FID) {
+ tftf_testcase_printf("Wrong Call Count: expected %u,\n"
+ " got %llu\n", TSP_NUM_FID,
+ (unsigned long long)ret.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* TrustedOS Service Call Revision details */
+ std_svc_args.arg0 = SMC_TOS_REVISION;
+ ret = tftf_smc(&std_svc_args);
+ if ((ret.ret0 != TSP_REVISION_MAJOR) ||
+ ret.ret1 != TSP_REVISION_MINOR) {
+ tftf_testcase_printf("Wrong Revision: expected {%u.%u}\n"
+ " got {%llu.%llu}\n",
+ TSP_REVISION_MAJOR, TSP_REVISION_MINOR,
+ (unsigned long long)ret.ret0,
+ (unsigned long long)ret.ret1);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/trusted_os/tsp/test_tsp_fast_smc.c b/tftf/tests/runtime_services/trusted_os/tsp/test_tsp_fast_smc.c
new file mode 100644
index 0000000..b987259
--- /dev/null
+++ b/tftf/tests/runtime_services/trusted_os/tsp/test_tsp_fast_smc.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+#define TEST_ITERATIONS_COUNT 1000
+
+static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
+
+/*
+ * This function calls and validates the results of TSP operation.
+ * It expects:
+ * - fn_identifier: SMC function identifier
+ * - arg1, arg2: data on which TSP performs operation
+ * - ret1, ret2: results expected after performing operation
+ * on arg1 and arg2
+ * Returns Success if return values of SMC operation are same as
+ * expected else failure.
+ */
+static test_result_t validate_tsp_operations(uint64_t fn_identifier,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t ret1,
+ uint64_t ret2)
+{
+ smc_args tsp_svc_params = {fn_identifier, arg1, arg2};
+ smc_ret_values tsp_result;
+
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ if (tsp_result.ret0) {
+ tftf_testcase_printf("TSP operation 0x%x failed, error:0x%x\n",
+ (unsigned int) fn_identifier,
+ (unsigned int) tsp_result.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (tsp_result.ret1 != ret1 || tsp_result.ret2 != ret2) {
+ tftf_testcase_printf("TSP function:0x%x returned wrong result:"
+ "got 0x%x 0x%x expected: 0x%x 0x%x\n",
+ (unsigned int)fn_identifier,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2,
+ (unsigned int)ret1,
+ (unsigned int)ret2);
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * This function issues SMC calls to trusted OS(TSP) to perform basic mathematical
+ * operations supported by it and validates the result.
+ */
+static test_result_t issue_trustedos_service_calls(void)
+{
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ test_result_t ret;
+ int i;
+
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+ for (i = 0; i < TEST_ITERATIONS_COUNT; i++) {
+ /*
+ * TSP add function performs addition of argx to itself and
+ * returns the result in argx where x is 1, 2
+ */
+ ret = validate_tsp_operations(TSP_FAST_FID(TSP_ADD), 4, 6, 8, 12);
+ if (ret != TEST_RESULT_SUCCESS)
+ return ret;
+
+ /*
+ * TSP sub function performs substraction of argx to itself and
+ * returns the result in argx where x is 1, 2
+ */
+ ret = validate_tsp_operations(TSP_FAST_FID(TSP_SUB), 4, 6, 0, 0);
+ if (ret != TEST_RESULT_SUCCESS)
+ return ret;
+
+ /*
+ * TSP mul function performs multiplication of argx to itself and
+ * returns the result in argx where x is 1, 2
+ */
+ ret = validate_tsp_operations(TSP_FAST_FID(TSP_MUL), 4, 6, 16, 36);
+ if (ret != TEST_RESULT_SUCCESS)
+ return ret;
+
+ /*
+ * TSP div function performs division of argx to itself and
+ * returns the result in argx where x is 1, 2
+ */
+ ret = validate_tsp_operations(TSP_FAST_FID(TSP_DIV), 4, 6, 1, 1);
+ if (ret != TEST_RESULT_SUCCESS)
+ return ret;
+ }
+ return ret;
+}
+
+/*
+ * @Test_Aim@ Stress test the tsp functionality by issuing fast smc calls
+ * to perform trusted OS operations on multiple CPUs
+ * Returns Success/Failure/Skipped (if Trusted OS is absent or is not TSP)
+ */
+test_result_t test_tsp_fast_smc_operations(void)
+{
+ u_register_t lead_cpu;
+ u_register_t cpu_mpid;
+ int cpu_node;
+ unsigned int core_pos;
+ int ret;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ lead_cpu = read_mpidr_el1() & MPID_MASK;
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already on */
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) issue_trustedos_service_calls,
+ 0);
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%llx (%i)\n",
+ (unsigned long long) cpu_mpid, ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wait for non-lead CPUs to enter the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+ }
+
+ return issue_trustedos_service_calls();
+}
diff --git a/tftf/tests/template_tests/test_template_multi_core.c b/tftf/tests/template_tests/test_template_multi_core.c
new file mode 100644
index 0000000..0986532
--- /dev/null
+++ b/tftf/tests/template_tests/test_template_multi_core.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
+
+/*
+ * Test entry point function for non-lead CPUs.
+ * Specified by the lead CPU when bringing up other CPUs.
+ */
+static test_result_t non_lead_cpu_fn(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Template code for a test running on multiple CPUs.
+ *
+ * This "test" powers on all CPUs on the platform and report test success.
+ * The function test_template_multi_core() runs on the lead CPU only.
+ * The test entrypoint for other CPUs is non_lead_cpu_fn(), as specified when
+ * bringing them up.
+ *
+ * This "test" is skipped on single-core platforms. If an error occurs during
+ * the bring-up of non-lead CPUs, it is skipped as well. Otherwise, this test
+ * always returns success.
+ */
+test_result_t test_template_multi_core(void)
+{
+ unsigned int lead_mpid;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ int psci_ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t) non_lead_cpu_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, psci_ret);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for non-lead CPUs to enter the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/template_tests/test_template_single_core.c b/tftf/tests/template_tests/test_template_single_core.c
new file mode 100644
index 0000000..3155baf
--- /dev/null
+++ b/tftf/tests/template_tests/test_template_single_core.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <tftf_lib.h>
+
+/*
+ * @Test_Aim@ Template code for a test running on a single CPU.
+ *
+ * This "test" does nothing but reporting test success. It runs on the lead CPU.
+ */
+test_result_t test_template_single_core(void)
+{
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/tests-arm-state-switch.xml b/tftf/tests/tests-arm-state-switch.xml
new file mode 100644
index 0000000..244f1bc
--- /dev/null
+++ b/tftf/tests/tests-arm-state-switch.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <!--
+ Test suite exercising execution state switch SiP service.
+
+ 'test_exec_state_switch_reset_before' must execute first in the suite, and
+ 'test_exec_state_switch_after_cpu_on' the last. See comments in
+ test_exec_state_switch.c for details.
+ -->
+ <testsuite name="State switch" description="Test ARM SiP State Switch service">
+ <testcase name="System reset before state switch" function="test_exec_state_switch_reset_before" />
+ <testcase name="Request state switch with invalid PC" function="test_exec_state_switch_invalid_pc" />
+ <testcase name="Request state switch with invalid context" function="test_exec_state_switch_invalid_ctx" />
+ <testcase name="Request a valid state switch" function="test_exec_state_switch_valid" />
+ <testcase name="Request a valid state switch after CPU_ON" function="test_exec_state_switch_after_cpu_on" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-boot-req.xml b/tftf/tests/tests-boot-req.xml
new file mode 100644
index 0000000..27d4844
--- /dev/null
+++ b/tftf/tests/tests-boot-req.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="Boot requirement tests" description="Tests for boot requirement according to ARM ARM and PSCI">
+ <testcase name="CNTFRQ compare test" function="test_cntfrq_check" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-common.xml b/tftf/tests/tests-common.xml
new file mode 100644
index 0000000..e2a1a2b
--- /dev/null
+++ b/tftf/tests/tests-common.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+
+<!-- External references to all individual tests files. -->
+<!DOCTYPE testsuites [
+ <!ENTITY tests-tftf-validation SYSTEM "tests-tftf-validation.xml">
+ <!ENTITY tests-boot-req SYSTEM "tests-boot-req.xml">
+ <!ENTITY tests-psci SYSTEM "tests-psci.xml">
+ <!ENTITY tests-sdei SYSTEM "tests-sdei.xml">
+ <!ENTITY tests-rt-instr SYSTEM "tests-runtime-instrumentation.xml">
+ <!ENTITY tests-tsp SYSTEM "tests-tsp.xml">
+ <!ENTITY tests-el3-pstate SYSTEM "tests-el3-power-state.xml">
+ <!ENTITY tests-state-switch SYSTEM "tests-arm-state-switch.xml">
+ <!ENTITY tests-cpu-extensions SYSTEM "tests-cpu-extensions.xml">
+ <!ENTITY tests-performance SYSTEM "tests-performance.xml">
+]>
+
+<testsuites>
+
+ &tests-tftf-validation;
+ &tests-boot-req;
+ &tests-psci;
+ &tests-sdei;
+ &tests-rt-instr;
+ &tests-tsp;
+ &tests-el3-pstate;
+ &tests-state-switch;
+ &tests-cpu-extensions;
+ &tests-performance;
+
+</testsuites>
diff --git a/tftf/tests/tests-cpu-extensions.xml b/tftf/tests/tests-cpu-extensions.xml
new file mode 100644
index 0000000..17f37fd
--- /dev/null
+++ b/tftf/tests/tests-cpu-extensions.xml
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="CPU extensions" description="Various CPU extensions tests">
+ <testcase name="AMUv1 non-zero counters" function="test_amu_nonzero_ctr" />
+ <testcase name="AMUv1 suspend/resume" function="test_amu_suspend_resume" />
+ </testsuite>
+
+ <testsuite name="ARM_ARCH_SVC" description="Arm Architecture Service tests">
+ <testcase name="SMCCC_ARCH_WORKAROUND_1 test" function="test_smccc_arch_workaround_1" />
+ <testcase name="SMCCC_ARCH_WORKAROUND_2 test" function="test_smccc_arch_workaround_2" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-el3-power-state.xml b/tftf/tests/tests-el3-power-state.xml
new file mode 100644
index 0000000..852401d
--- /dev/null
+++ b/tftf/tests/tests-el3-power-state.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="EL3 power state parser validation" description="Validation of EL3 power state parsing algorithm">
+ <testcase name="Create all power states and validate EL3 power state parsing" function="test_psci_validate_pstate" />
+ <testcase name="Create only local power state and validate EL3 power state parsing" function="test_psci_valid_local_pstate" />
+ <testcase name="Create invalid local power state at all levels and validate EL3 power state parsing" function="test_psci_invalid_stateID" />
+ <testcase name="Create invalid power state type and validate EL3 power state parsing" function="test_psci_invalid_state_type" />
+ <testcase name="Create invalid power level and validate EL3 power state parsing for original state format" function="test_psci_invalid_power_level" />
+ <testcase name="Create a power state with valid and invalid local state ID at different levels and validate power state parsing" function="test_psci_mixed_state_id" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-extensive.xml b/tftf/tests/tests-extensive.xml
new file mode 100644
index 0000000..7ba17d3
--- /dev/null
+++ b/tftf/tests/tests-extensive.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+
+<!-- External references to all individual tests files. -->
+<!DOCTYPE testsuites [
+ <!ENTITY tests-psci-extensive SYSTEM "tests-psci-extensive.xml">
+
+ <!ENTITY tests-tftf-validation SYSTEM "tests-tftf-validation.xml">
+ <!ENTITY tests-psci SYSTEM "tests-psci.xml">
+ <!ENTITY tests-sdei SYSTEM "tests-sdei.xml">
+ <!ENTITY tests-rt-instr SYSTEM "tests-runtime-instrumentation.xml">
+ <!ENTITY tests-tsp SYSTEM "tests-tsp.xml">
+ <!ENTITY tests-el3-pstate SYSTEM "tests-el3-power-state.xml">
+ <!ENTITY tests-state-switch SYSTEM "tests-arm-state-switch.xml">
+ <!ENTITY tests-cpu-extensions SYSTEM "tests-cpu-extensions.xml">
+ <!ENTITY tests-performance SYSTEM "tests-performance.xml">
+]>
+
+<testsuites>
+
+ &tests-psci-extensive;
+
+ &tests-tftf-validation;
+ &tests-psci;
+ &tests-sdei;
+ &tests-rt-instr;
+ &tests-tsp;
+ &tests-el3-pstate;
+ &tests-state-switch;
+ &tests-cpu-extensions;
+ &tests-performance;
+
+</testsuites>
diff --git a/tftf/tests/tests-fwu.xml b/tftf/tests/tests-fwu.xml
new file mode 100644
index 0000000..0d86309
--- /dev/null
+++ b/tftf/tests/tests-fwu.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="fwu toc test" description="Validate FWU TOC invalid scenario">
+ <testcase name="FWU TOC Invalid" function="test_fwu_toc" />
+ </testsuite>
+
+ <testsuite name="fwu auth test" description="Validate FWU AUTH Failure scenario">
+ <testcase name="FWU AUTH Failure" function="test_fwu_auth" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-manual.xml b/tftf/tests/tests-manual.xml
new file mode 100644
index 0000000..15a9609
--- /dev/null
+++ b/tftf/tests/tests-manual.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+
+ <testsuite name="PSCI STAT" description="Test PSCI STAT support System level">
+ <testcase name="for stats after system reset" function="test_psci_stats_after_reset" />
+ <testcase name="for stats after system shutdown" function="test_psci_stats_after_shutdown" />
+ </testsuite>
+
+ <testsuite name="System off test" description="Validate SYSTEM_OFF PSCI call">
+ <testcase name="System Off" function="test_system_off" />
+ </testsuite>
+
+ <testsuite name="PSCI mem_protect" description="Check the mem_protect feature">
+ <testcase name="PSCI mem_protect" function="test_mem_protect" />
+ </testsuite>
+
+ <testsuite name="PSCI reset2" description="Check the reset2 feature">
+ <testcase name="PSCI reset2 - warm reset" function="reset2_warm" />
+ <testcase name="PSCI reset2 - invalid reset options" function="reset2_test_invalid" />
+ <testcase name="PSCI reset2 - warm reset and mem_protect" function="reset2_mem_protect" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-performance.xml b/tftf/tests/tests-performance.xml
new file mode 100644
index 0000000..5ad5347
--- /dev/null
+++ b/tftf/tests/tests-performance.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="Performance tests" description="Measure some performance">
+ <testcase name="PSCI_VERSION latency" function="smc_psci_version_latency" />
+ <testcase name="Standard Service Call UID latency" function="smc_std_svc_call_uid_latency" />
+ <testcase name="SMCCC_ARCH_WORKAROUND_1 latency" function="smc_arch_workaround_1" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-psci-extensive.xml b/tftf/tests/tests-psci-extensive.xml
new file mode 100644
index 0000000..a2c05b2
--- /dev/null
+++ b/tftf/tests/tests-psci-extensive.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="PSCI CPU ON OFF Stress Tests" description="Stress-test hotplug">
+ <testcase name="Repeated shutdown of all cores to stress test CPU_ON, CPU_SUSPEND and CPU_OFF"
+ function="psci_on_off_suspend_coherency_test" />
+ <!-- testcase name="Verify PSCI CPU ON race" function="psci_verify_cpu_on_race" / -->
+ <testcase name="PSCI CPU ON OFF stress test" function="psci_cpu_on_off_stress" />
+ <testcase name="PSCI CPU ON OFF SUSPEND stress test" function="psci_cpu_on_off_suspend_stress" />
+ <testcase name="Repeated hotplug of all cores to stress test CPU_ON and CPU_OFF"
+ function="psci_hotplug_stress_test" />
+ <testcase name="Random hotplug cores in a large iteration to stress boot path code"
+ function="psci_hotplug_single_core_stress_test" />
+ <testcase name="Hotplug a cluster in a large iteration to stress cluster on and off functionality"
+ function="psci_cluster_hotplug_stress_test" />
+ </testsuite>
+
+ <testsuite name="PSCI SYSTEM SUSPEND stress tests" description="Stress-test SYSTEM SUSPEND">
+ <testcase name="Stress test PSCI_SYSTEM_SUSPEND" function="psci_sys_susp_on_off_stress_test" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-psci.xml b/tftf/tests/tests-psci.xml
new file mode 100644
index 0000000..e2be557
--- /dev/null
+++ b/tftf/tests/tests-psci.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <!--
+ Strictly speaking, this testsuite is not testing PSCI but we put it here
+ nonetheless to avoid having it alone in a separate XML file.
+ -->
+ <testsuite name="Query runtime services" description="Generic queries as defined by the SMCCC">
+ <testcase name="Unknown SMC" function="test_unknown_smc" />
+ <testcase name="Query Standard Service" function="test_query_std_svc" />
+ </testsuite>
+
+ <testsuite name="PSCI Version" description="Check the version of PSCI implemented">
+ <testcase name="PSCI Version" function="test_psci_version" />
+ </testsuite>
+
+ <testsuite name="PSCI Affinity Info" description="Test PSCI AFFINITY_INFO support">
+ <testcase name="Affinity info level0 on" function="test_affinity_info_level0_on" />
+ <testcase name="Affinity info level0 off" function="test_affinity_info_level0_off" />
+ <testcase name="Affinity info level1 on" function="test_affinity_info_level1_on" />
+ <testcase name="Affinity info level1 off" function="test_affinity_info_level1_off" />
+ <testcase name="Affinity info level2" function="test_affinity_info_level2" />
+ <testcase name="Affinity info level3" function="test_affinity_info_level3" />
+ <testcase name="Affinity info level0 powerdown" function="test_affinity_info_level0_powerdown" />
+ </testsuite>
+
+ <testsuite name="CPU Hotplug" description="Test PSCI CPU Hotplug support">
+ <testcase name="CPU hotplug" function="test_psci_cpu_hotplug" />
+ <testcase name="CPU already on" function="test_psci_cpu_hotplug_plugged" />
+ <testcase name="Context ID passing" function="test_context_ids" />
+ <testcase name="Invalid CPU" function="test_psci_cpu_hotplug_invalid_cpu" />
+ <testcase name="Invalid entry point" function="test_psci_cpu_hotplug_invalid_ep" />
+ </testsuite>
+
+ <testsuite name="PSCI CPU Suspend" description="Test PSCI CPU Suspend support">
+ <testcase name="CPU suspend to powerdown at level 0" function="test_psci_suspend_powerdown_level0" />
+ <testcase name="CPU suspend to powerdown at level 1" function="test_psci_suspend_powerdown_level1" />
+ <testcase name="CPU suspend to powerdown at level 2" function="test_psci_suspend_powerdown_level2" />
+ <testcase name="CPU suspend to powerdown at level 3" function="test_psci_suspend_powerdown_level3" />
+
+ <testcase name="CPU suspend to standby at level 0" function="test_psci_suspend_standby_level0" />
+ <testcase name="CPU suspend to standby at level 1" function="test_psci_suspend_standby_level1" />
+ <testcase name="CPU suspend to standby at level 2" function="test_psci_suspend_standby_level2" />
+ <testcase name="CPU suspend to standby at level 3" function="test_psci_suspend_standby_level3" />
+ </testsuite>
+
+ <testsuite name="PSCI STAT" description="Test PSCI STAT support Core level">
+ <testcase name="for valid composite state CPU suspend" function="test_psci_stat_all_power_states" />
+ <testcase name="Stats test cases for CPU OFF" function="test_psci_stats_cpu_off" />
+ <testcase name="Stats test cases after system suspend" function="test_psci_stats_system_suspend" />
+ </testsuite>
+
+ <testsuite name="PSCI NODE_HW_STATE" description="Test PSCI NODE_HW_STATE API">
+ <testcase name="Tests for NODE_HW_STATE" function="test_psci_node_hw_state" />
+ <testcase name="Tests for NODE_HW_STATE on multicluster" function="test_psci_node_hw_state_multi" />
+ </testsuite>
+
+ <testsuite name="PSCI Features" description="Check the PSCI features implemented">
+ <testcase name="PSCI Features" function="test_psci_features" />
+ <testcase name="PSCI Invalid Features" function="test_psci_features_invalid_id" />
+ </testsuite>
+
+ <testsuite name="PSCI MIGRATE_INFO_TYPE" description="Test MIGRATE_INFO_TYPE support">
+ <testcase name="PSCI MIGRATE_INFO_TYPE" function="test_migrate_info_type" />
+ </testsuite>
+
+ <testsuite name="PSCI mem_protect_check" description="Check the mem_protect_check_range feature">
+ <testcase name="PSCI mem_protect_check" function="test_mem_protect_check" />
+ </testsuite>
+
+ <testsuite name="PSCI System Suspend Validation" description="Validate PSCI System Suspend API">
+ <testcase name="System suspend multiple times" function="test_psci_sys_susp_multiple_iteration" />
+ <testcase name="system suspend from all cores" function="test_system_suspend_from_all_cores" />
+ <testcase name="System suspend with cores on" function="test_psci_sys_susp_with_cores_on" />
+ <testcase name="Suspend system with cores in suspend" function="test_psci_sys_susp_with_cores_in_suspend" />
+ <testcase name="Validate suspend to RAM functionality" function="test_psci_sys_susp_validate_ram" />
+ <testcase name="System suspend with invalid entrypoint address" function="test_system_suspend_invalid_entrypoint" />
+ <testcase name="System suspend with pending IRQ" function="test_psci_sys_susp_pending_irq" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-runtime-instrumentation.xml b/tftf/tests/tests-runtime-instrumentation.xml
new file mode 100644
index 0000000..4d45900
--- /dev/null
+++ b/tftf/tests/tests-runtime-instrumentation.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="Runtime Instrumentation Validation" description="Validate PMF Runtime Instrumentation">
+ <testcase name="Suspend to deepest power level on all cores in parallel" function="test_rt_instr_susp_deep_parallel" />
+ <testcase name="Suspend to deepest power level on all cores in sequence" function="test_rt_instr_susp_deep_serial" />
+ <testcase name="CPU suspend on all cores in parallel" function="test_rt_instr_cpu_susp_parallel" />
+ <testcase name="CPU suspend on all cores in sequence" function="test_rt_instr_cpu_susp_serial" />
+ <testcase name="CPU off on all non-lead cores in sequence and suspend lead to deepest power level" function="test_rt_instr_cpu_off_serial" />
+ <testcase name="PSCI version call on all cores in parallel" function="test_rt_instr_psci_version_parallel" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-sdei.xml b/tftf/tests/tests-sdei.xml
new file mode 100644
index 0000000..db6b0c9
--- /dev/null
+++ b/tftf/tests/tests-sdei.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="SDEI" description="SDEI test framework">
+ <testcase name="SDEI event handler state machine testing" function="test_sdei_state" />
+ <testcase name="SDEI event handling on all cores in sequence" function="test_sdei_event_serial" />
+ <testcase name="SDEI event handling on all cores in parallel" function="test_sdei_event_parallel" />
+ <testcase name="SDEI event signaling: each core signals itself" function="test_sdei_event_signal_serial" />
+ <testcase name="SDEI event signaling: one core signals all others" function="test_sdei_event_signal_all" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-single-fault.xml b/tftf/tests/tests-single-fault.xml
new file mode 100644
index 0000000..570be46
--- /dev/null
+++ b/tftf/tests/tests-single-fault.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="Single fault" description="Single fault injection">
+ <testcase name="Inject SError and wait" function="test_single_fault" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-spm.xml b/tftf/tests/tests-spm.xml
new file mode 100644
index 0000000..ff3ff44
--- /dev/null
+++ b/tftf/tests/tests-spm.xml
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="SPM tests"
+ description="Test SPM APIs">
+ <testcase name="SPM NS interrupts test"
+ function="test_secure_partition_interrupt_by_ns" />
+ <testcase name="SPM secondary CPUs sequential test"
+ function="test_secure_partition_secondary_cores_seq" />
+ <testcase name="SPM secondary CPUs simultaneous test"
+ function="test_secure_partition_secondary_cores_sim" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-template.xml b/tftf/tests/tests-template.xml
new file mode 100644
index 0000000..d55595e
--- /dev/null
+++ b/tftf/tests/tests-template.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <!--
+ The "template" testsuite aims at providing template test code as a
+ starting point for developing new tests. These tests don't do anything
+ useful in terms of testing.
+ -->
+ <testsuite name="Template" description="Template test code">
+ <testcase name="Single core test" function="test_template_single_core" />
+ <testcase name="Multi core test" function="test_template_multi_core" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-tftf-validation.xml b/tftf/tests/tests-tftf-validation.xml
new file mode 100644
index 0000000..932b10e
--- /dev/null
+++ b/tftf/tests/tests-tftf-validation.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="Framework Validation" description="Validate the core features of the test framework">
+ <testcase name="NVM support" function="test_validation_nvm" />
+ <testcase name="NVM serialisation" function="test_validate_nvm_serialisation" />
+ <testcase name="Events API" function="test_validation_events" />
+ <testcase name="IRQ handling" function="test_validation_irq" />
+ <testcase name="SGI support" function="test_validation_sgi" />
+ </testsuite>
+
+ <testsuite name="Timer framework Validation" description="Validate the timer driver and timer framework">
+ <testcase name="Verify the timer interrupt generation" function="test_timer_framework_interrupt" />
+ <testcase name="Target timer to a power down cpu" function="test_timer_target_power_down_cpu" />
+ <testcase name="Test scenario where multiple CPUs call same timeout" function="test_timer_target_multiple_same_interval" />
+ <testcase name="Stress test the timer framework" function="stress_test_timer_framework" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-tsp.xml b/tftf/tests/tests-tsp.xml
new file mode 100644
index 0000000..e98df5f
--- /dev/null
+++ b/tftf/tests/tests-tsp.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="IRQ support in TSP" description="Test the normal IRQ preemption support in TSP.">
+ <testcase name="TSP preempt by IRQ and resume" function="tsp_int_and_resume" />
+ <testcase name="Fast SMC while TSP preempted" function="test_fast_smc_when_tsp_preempted" />
+ <testcase name="STD SMC resumption while TSP preempted" function="test_std_smc_when_tsp_preempted_resume" />
+ <testcase name="STD SMC abortion while TSP preempted" function="test_std_smc_when_tsp_preempted_abort" />
+ <testcase name="Resume SMC without TSP preemption" function="test_resume_smc_without_preemption" />
+ <testcase name="Stress TSP preemption and resumption" function="tsp_int_and_resume_stress" />
+ <testcase name="Test Secure FIQ while TSP is preempted" function="tsp_fiq_while_int" />
+ <testcase name="Resume preempted STD SMC" function="test_irq_preempted_std_smc" />
+ <testcase name="Resume preempted STD SMC from other CPUs" function="test_resume_preempted_std_smc_other_cpus" />
+ <testcase name="Resume STD SMC from different CPUs" function="test_resume_different_cpu_preempted_std_smc" />
+ <testcase name="Resume preempted STD SMC after PSCI CPU OFF/ON cycle" function="test_psci_cpu_on_off_preempted_std_smc" />
+ <testcase name="Resume preempted STD SMC after PSCI SYSTEM SUSPEND" function="test_psci_system_suspend_preempted_std_smc" />
+ </testsuite>
+
+ <testsuite name="TSP handler standard functions result test" description="Validate TSP SMC standard function call">
+ <testcase name="TestSecurePayload standard functions service call" function="test_smc_tsp_std_fns_call" />
+ </testsuite>
+
+ <testsuite name="Stress test TSP functionality" description="Validate TSP functionality">
+ <testcase name="Stress test TSP functionality" function="test_tsp_fast_smc_operations" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-uncontainable.xml b/tftf/tests/tests-uncontainable.xml
new file mode 100644
index 0000000..ecff3ac
--- /dev/null
+++ b/tftf/tests/tests-uncontainable.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="Uncontainable error" description="Uncontainable error">
+ <testcase name="Inject Uncontainable error" function="test_uncontainable" />
+ </testsuite>
+</testsuites>
+
diff --git a/tftf/tests/tests.mk b/tftf/tests/tests.mk
new file mode 100644
index 0000000..8ee8cc2
--- /dev/null
+++ b/tftf/tests/tests.mk
@@ -0,0 +1,88 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Path to the XML file listing the tests to run. If there is a platform-specific
+# test file, use it. If not, use the common one. If the user specified another
+# one, use that one instead.
+ifneq ($(wildcard ${PLAT_PATH}/tests.xml),)
+ TESTS_FILE := ${PLAT_PATH}/tests.xml
+else
+ TESTS_FILE := tftf/tests/tests-common.xml
+endif
+
+# Check that the selected tests file exists.
+ifeq (,$(wildcard ${TESTS_FILE}))
+ $(error "The file TESTS_FILE points to cannot be found")
+endif
+
+SPM_TESTS_SOURCES := \
+ $(addprefix tftf/tests/runtime_services/secure_service/, \
+ secure_service_helpers.c \
+ test_secure_service_handle.c \
+ test_secure_service_interrupts.c \
+ )
+
+FWU_TESTS_SOURCES := \
+ tftf/tests/fwu_tests/test_fwu_toc.c \
+ tftf/tests/fwu_tests/test_fwu_auth.c \
+ plat/common/fwu_nvm_accessors.c
+
+TESTS_SOURCES := $(addprefix tftf/tests/, \
+ extensions/amu/test_amu.c \
+ framework_validation_tests/test_timer_framework.c \
+ framework_validation_tests/test_validation_events.c \
+ framework_validation_tests/test_validation_irq.c \
+ framework_validation_tests/test_validation_nvm.c \
+ framework_validation_tests/test_validation_sgi.c \
+ misc_tests/inject_serror.S \
+ misc_tests/test_single_fault.c \
+ misc_tests/test_uncontainable.c \
+ performance_tests/smc_latencies.c \
+ misc_tests/boot_req_tests/test_cntfrq.c \
+ runtime_services/arm_arch_svc/smccc_arch_workaround_1.c \
+ runtime_services/arm_arch_svc/smccc_arch_workaround_2.c \
+ runtime_services/sip_service/test_exec_state_switch.c \
+ runtime_services/sip_service/test_exec_state_switch_asm.S \
+ runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c \
+ runtime_services/standard_service/psci/api_tests/affinity_info/test_psci_affinity_info.c \
+ runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug.c \
+ runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug_invalid.c \
+ runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c \
+ runtime_services/standard_service/psci/api_tests/migrate_info_type/test_migrate_info_type.c \
+ runtime_services/standard_service/psci/api_tests/psci_features/test_psci_features.c \
+ runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c \
+ runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c \
+ runtime_services/standard_service/psci/api_tests/psci_version/test_psci_version.c \
+ runtime_services/standard_service/psci/api_tests/system_off/test_system_off.c \
+ runtime_services/standard_service/psci/api_tests/system_suspend/test_psci_system_suspend.c \
+ runtime_services/standard_service/psci/api_tests/validate_power_state/test_validate_power_state.c \
+ runtime_services/standard_service/psci/system_tests/test_psci_hotplug_stress.c \
+ runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c \
+ runtime_services/standard_service/psci/system_tests/test_psci_system_suspend_stress.c \
+ runtime_services/standard_service/psci/api_tests/mem_protect/test_mem_protect.c \
+ runtime_services/standard_service/psci/api_tests/mem_protect_check/mem_protect_check.c \
+ runtime_services/standard_service/psci/api_tests/reset2/reset2.c \
+ runtime_services/standard_service/query_std_svc.c \
+ runtime_services/standard_service/unknown_smc.c \
+ runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S \
+ runtime_services/standard_service/sdei/system_tests/test_sdei.c \
+ runtime_services/standard_service/sdei/system_tests/test_sdei_state.c \
+ runtime_services/trusted_os/tsp/test_irq_preempted_std_smc.c \
+ runtime_services/trusted_os/tsp/test_normal_int_switch.c \
+ runtime_services/trusted_os/tsp/test_smc_tsp_std_fn_call.c \
+ runtime_services/trusted_os/tsp/test_tsp_fast_smc.c \
+)
+
+TESTS_SOURCES += ${SPM_TESTS_SOURCES} \
+ ${FWU_TESTS_SOURCES}
+
+# The following source files are part of the "template" testsuite, which aims
+# at providing template test code as a starting point for developing new tests.
+# They don't do anything useful in terms of testing so they are disabled by
+# default. Uncomment those lines along with the corresponding test suite XML
+# node in the tests/tests.xml file to enable them.
+# TESTS_SOURCES += tftf/tests/template_tests/test_template_single_core.c \
+# tftf/tests/template_tests/test_template_multi_core.c