Trusted Firmware-A Tests, version 2.0
This is the first public version of the tests for the Trusted
Firmware-A project. Please see the documentation provided in the
source tree for more details.
Change-Id: I6f3452046a1351ac94a71b3525c30a4ca8db7867
Signed-off-by: Sandrine Bailleux <sandrine.bailleux@arm.com>
Co-authored-by: amobal01 <amol.balasokamble@arm.com>
Co-authored-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
Co-authored-by: Asha R <asha.r@arm.com>
Co-authored-by: Chandni Cherukuri <chandni.cherukuri@arm.com>
Co-authored-by: David Cunado <david.cunado@arm.com>
Co-authored-by: Dimitris Papastamos <dimitris.papastamos@arm.com>
Co-authored-by: Douglas Raillard <douglas.raillard@arm.com>
Co-authored-by: dp-arm <dimitris.papastamos@arm.com>
Co-authored-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com>
Co-authored-by: Jonathan Wright <jonathan.wright@arm.com>
Co-authored-by: Kévin Petit <kevin.petit@arm.com>
Co-authored-by: Roberto Vargas <roberto.vargas@arm.com>
Co-authored-by: Sathees Balya <sathees.balya@arm.com>
Co-authored-by: Shawon Roy <Shawon.Roy@arm.com>
Co-authored-by: Soby Mathew <soby.mathew@arm.com>
Co-authored-by: Thomas Abraham <thomas.abraham@arm.com>
Co-authored-by: Vikram Kanigiri <vikram.kanigiri@arm.com>
Co-authored-by: Yatharth Kochar <yatharth.kochar@arm.com>
diff --git a/lib/power_management/hotplug/hotplug.c b/lib/power_management/hotplug/hotplug.c
new file mode 100644
index 0000000..7c3a988
--- /dev/null
+++ b/lib/power_management/hotplug/hotplug.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <cdefs.h> /* For __dead2 */
+#include <console.h>
+#include <debug.h>
+#include <irq.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <spinlock.h>
+#include <stdint.h>
+#include <tftf.h>
+
+/*
+ * Affinity info map of CPUs as seen by TFTF
+ * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_ON to mark CPU i
+ * as ON.
+ * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_ON_PENDING to mark
+ * CPU i as ON_PENDING.
+ * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_OFF to mark CPU i
+ * as OFF.
+ */
+static tftf_cpu_state_t cpus_status_map[PLATFORM_CORE_COUNT];
+static int cpus_status_init_done;
+
+/*
+ * Reference count keeping track of the number of CPUs participating in
+ * a test.
+ */
+static volatile unsigned int ref_cnt;
+
+/* Lock to prevent concurrent accesses to the reference count */
+static spinlock_t ref_cnt_lock;
+
+/* Per-cpu test entrypoint */
+volatile test_function_t test_entrypoint[PLATFORM_CORE_COUNT];
+
+u_register_t tftf_primary_core = INVALID_MPID;
+
+unsigned int tftf_inc_ref_cnt(void)
+{
+ unsigned int cnt;
+
+ spin_lock(&ref_cnt_lock);
+ assert(ref_cnt < PLATFORM_CORE_COUNT);
+ cnt = ++ref_cnt;
+ spin_unlock(&ref_cnt_lock);
+
+ VERBOSE("Entering the test (%u CPUs in the test now)\n", cnt);
+
+ return cnt;
+}
+
+unsigned int tftf_dec_ref_cnt(void)
+{
+ unsigned int cnt;
+
+ spin_lock(&ref_cnt_lock);
+ assert(ref_cnt != 0);
+ cnt = --ref_cnt;
+ spin_unlock(&ref_cnt_lock);
+
+ VERBOSE("Exiting the test (%u CPUs in the test now)\n", cnt);
+
+ return cnt;
+}
+
+unsigned int tftf_get_ref_cnt(void)
+{
+ return ref_cnt;
+}
+
+void tftf_init_cpus_status_map(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Check only primary does the initialisation */
+ assert((mpid & MPID_MASK) == tftf_primary_core);
+
+ /* Check init is done only once */
+ assert(!cpus_status_init_done);
+
+ cpus_status_init_done = 1;
+
+ /*
+ * cpus_status_map already initialised to zero as part of BSS init,
+ * just set the primary to ON state
+ */
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON;
+}
+
+void tftf_set_cpu_online(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /*
+ * Wait here till the `tftf_try_cpu_on` has had a chance to update the
+ * the cpu state.
+ */
+ while (cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_OFF)
+ ;
+
+ spin_lock(&cpus_status_map[core_pos].lock);
+ assert(cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON_PENDING);
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON;
+ spin_unlock(&cpus_status_map[core_pos].lock);
+}
+
+void tftf_set_cpu_offline(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ spin_lock(&cpus_status_map[core_pos].lock);
+
+ assert(tftf_is_cpu_online(mpid));
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_OFF;
+ spin_unlock(&cpus_status_map[core_pos].lock);
+}
+
+unsigned int tftf_is_cpu_online(unsigned int mpid)
+{
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ return cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON;
+}
+
+unsigned int tftf_is_core_pos_online(unsigned int core_pos)
+{
+ return cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON;
+}
+
+int32_t tftf_cpu_on(u_register_t target_cpu,
+ uintptr_t entrypoint,
+ u_register_t context_id)
+{
+ int32_t ret;
+ tftf_affinity_info_t cpu_state;
+ unsigned int core_pos = platform_get_core_pos(target_cpu);
+
+ spin_lock(&cpus_status_map[core_pos].lock);
+ cpu_state = cpus_status_map[core_pos].state;
+
+ if (cpu_state == TFTF_AFFINITY_STATE_ON) {
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ return PSCI_E_ALREADY_ON;
+ }
+
+ if (cpu_state == TFTF_AFFINITY_STATE_ON_PENDING) {
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ return PSCI_E_SUCCESS;
+ }
+
+ assert(cpu_state == TFTF_AFFINITY_STATE_OFF);
+
+ do {
+ ret = tftf_psci_cpu_on(target_cpu,
+ (uintptr_t) tftf_hotplug_entry,
+ context_id);
+
+ /* Check if multiple CPU_ON calls are done for same CPU */
+ assert(ret != PSCI_E_ON_PENDING);
+ } while (ret == PSCI_E_ALREADY_ON);
+
+ if (ret == PSCI_E_SUCCESS) {
+ /*
+ * Populate the test entry point for this core.
+ * This is the address where the core will jump to once the framework
+ * has finished initialising it.
+ */
+ test_entrypoint[core_pos] = (test_function_t) entrypoint;
+
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON_PENDING;
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ } else {
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ ERROR("Failed to boot CPU 0x%llx (%d)\n",
+ (unsigned long long)target_cpu, ret);
+ }
+
+ return ret;
+}
+
+int32_t tftf_try_cpu_on(u_register_t target_cpu,
+ uintptr_t entrypoint,
+ u_register_t context_id)
+{
+ int32_t ret;
+ unsigned int core_pos = platform_get_core_pos(target_cpu);
+
+ ret = tftf_psci_cpu_on(target_cpu,
+ (uintptr_t) tftf_hotplug_entry,
+ context_id);
+
+ if (ret == PSCI_E_SUCCESS) {
+ spin_lock(&cpus_status_map[core_pos].lock);
+ assert(cpus_status_map[core_pos].state ==
+ TFTF_AFFINITY_STATE_OFF);
+ cpus_status_map[core_pos].state =
+ TFTF_AFFINITY_STATE_ON_PENDING;
+
+ spin_unlock(&cpus_status_map[core_pos].lock);
+
+ /*
+ * Populate the test entry point for this core.
+ * This is the address where the core will jump to once the
+ * framework has finished initialising it.
+ */
+ test_entrypoint[core_pos] = (test_function_t) entrypoint;
+ }
+
+ return ret;
+}
+
+/*
+ * Prepare the core to power off. Any driver which needs to perform specific
+ * tasks before powering off a CPU, e.g. migrating interrupts to another
+ * core, can implement a function and call it from here.
+ */
+static void tftf_prepare_cpu_off(void)
+{
+ /*
+ * Do the bare minimal to turn off this CPU i.e. turn off interrupts
+ * and disable the GIC CPU interface
+ */
+ disable_irq();
+ arm_gic_disable_interrupts_local();
+}
+
+/*
+ * Revert the changes made during tftf_prepare_cpu_off()
+ */
+static void tftf_revert_cpu_off(void)
+{
+ arm_gic_enable_interrupts_local();
+ enable_irq();
+}
+
+int32_t tftf_cpu_off(void)
+{
+ int32_t ret;
+
+ tftf_prepare_cpu_off();
+ tftf_set_cpu_offline();
+
+ INFO("Powering off\n");
+
+ /* Flush console before the last CPU is powered off. */
+ if (tftf_get_ref_cnt() == 0)
+ console_flush();
+
+ /* Power off the CPU */
+ ret = tftf_psci_cpu_off();
+
+ ERROR("Failed to power off (%d)\n", ret);
+
+ /*
+ * PSCI CPU_OFF call does not return when successful.
+ * Otherwise, it should return the PSCI error code 'DENIED'.
+ */
+ assert(ret == PSCI_E_DENIED);
+
+ /*
+ * The CPU failed to power down since we returned from
+ * tftf_psci_cpu_off(). So we need to adjust the framework's view of
+ * the core by marking it back online.
+ */
+ tftf_set_cpu_online();
+ tftf_revert_cpu_off();
+
+ return ret;
+}
+
+/*
+ * C entry point for a CPU that has just been powered up.
+ */
+void __dead2 tftf_warm_boot_main(void)
+{
+ /* Initialise the CPU */
+ tftf_arch_setup();
+ arm_gic_setup_local();
+
+ /* Enable the SGI used by the timer management framework */
+ tftf_irq_enable(IRQ_WAKE_SGI, GIC_HIGHEST_NS_PRIORITY);
+
+ enable_irq();
+
+ INFO("Booting\n");
+
+ tftf_set_cpu_online();
+
+ /* Enter the test session */
+ run_tests();
+
+ /* Should never reach this point */
+ bug_unreachable();
+}
diff --git a/lib/power_management/suspend/aarch32/asm_tftf_suspend.S b/lib/power_management/suspend/aarch32/asm_tftf_suspend.S
new file mode 100644
index 0000000..cc59e7d
--- /dev/null
+++ b/lib/power_management/suspend/aarch32/asm_tftf_suspend.S
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <psci.h>
+#include "../suspend_private.h"
+
+ .global __tftf_suspend
+ .global __tftf_save_arch_context
+ .global __tftf_cpu_resume_ep
+
+ .section .text, "ax"
+
+/*
+ * Saves CPU state for entering suspend. This saves callee registers on stack,
+ * and allocates space on the stack to save the CPU specific registers for
+ * coming out of suspend.
+ *
+ * r0 contains a pointer to tftf_suspend_context structure.
+ */
+func __tftf_suspend
+ push {r4 - r12, lr}
+ mov r2, sp
+ sub sp, sp, #SUSPEND_CTX_SZ
+ mov r1, sp
+ /*
+ * r1 now points to struct tftf_suspend_ctx allocated on the stack
+ */
+ str r2, [r1, #SUSPEND_CTX_SP_OFFSET]
+ bl tftf_enter_suspend
+
+ /*
+ * If execution reaches this point, the suspend call was either
+ * a suspend to standby call or an invalid suspend call.
+ * In case of suspend to powerdown, execution will instead resume in
+ * __tftf_cpu_resume_ep().
+ */
+ add sp, sp, #SUSPEND_CTX_SZ
+ pop {r4 - r12, lr}
+ bx lr
+endfunc __tftf_suspend
+
+func __tftf_save_arch_context
+ ldcopr r1, HMAIR0
+ ldcopr r2, HCR
+ stm r0!, {r1, r2}
+ ldcopr16 r1, r2, HTTBR_64
+ stm r0!, {r1, r2}
+ ldcopr r1, HTCR
+ ldcopr r2, HVBAR
+ ldcopr r3, HSCTLR
+ stm r0, {r1, r2, r3}
+ bx lr
+endfunc __tftf_save_arch_context
+
+/*
+ * Restore CPU register context
+ * r0 -- Should contain the context pointer
+ */
+func __tftf_cpu_resume_ep
+ /* Invalidate local tlb entries before turning on MMU */
+ stcopr r0, TLBIALLH
+ mov r4, r0
+ ldm r0!, {r1, r2}
+ stcopr r1, HMAIR0
+ stcopr r2, HCR
+ ldm r0!, {r1, r2}
+ stcopr16 r1, r2, HTTBR_64
+ ldm r0, {r1, r2, r3}
+ stcopr r1, HTCR
+ stcopr r2, HVBAR
+
+ /*
+ * TLB invalidations need to be completed before enabling MMU
+ */
+ dsb nsh
+ stcopr r3, HSCTLR
+ /* Ensure the MMU enable takes effect immediately */
+ isb
+
+ mov r0, r4
+ ldr r2, [r0, #SUSPEND_CTX_SP_OFFSET]
+ mov sp, r2
+ ldr r1, [r0, #SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET]
+ cmp r1, #0
+ beq skip_sys_restore
+ bl tftf_restore_system_ctx
+skip_sys_restore:
+ pop {r4 - r12, lr}
+ mov r0, #PSCI_E_SUCCESS
+ bx lr
+endfunc __tftf_cpu_resume_ep
diff --git a/lib/power_management/suspend/aarch64/asm_tftf_suspend.S b/lib/power_management/suspend/aarch64/asm_tftf_suspend.S
new file mode 100644
index 0000000..692bade
--- /dev/null
+++ b/lib/power_management/suspend/aarch64/asm_tftf_suspend.S
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <psci.h>
+#include "../suspend_private.h"
+
+ .global __tftf_suspend
+ .global __tftf_save_arch_context
+ .global __tftf_cpu_resume_ep
+
+ .section .text, "ax"
+
+/*
+ * Saves CPU state for entering suspend. This saves callee registers on stack,
+ * and allocates space on the stack to save the CPU specific registers for
+ * coming out of suspend.
+ *
+ * x0 contains a pointer to tftf_suspend_context structure.
+ */
+func __tftf_suspend
+ stp x29, x30, [sp, #-96]!
+ stp x19, x20, [sp, #16]
+ stp x21, x22, [sp, #32]
+ stp x23, x24, [sp, #48]
+ stp x25, x26, [sp, #64]
+ stp x27, x28, [sp, #80]
+ mov x2, sp
+ sub sp, sp, #SUSPEND_CTX_SZ
+ mov x1, sp
+ /*
+ * x1 now points to struct tftf_suspend_ctx allocated on the stack
+ */
+ str x2, [x1, #SUSPEND_CTX_SP_OFFSET]
+ bl tftf_enter_suspend
+
+ /*
+ * If execution reaches this point, the suspend call was either
+ * a suspend to standby call or an invalid suspend call.
+ * In case of suspend to powerdown, execution will instead resume in
+ * __tftf_cpu_resume_ep().
+ */
+ add sp, sp, #SUSPEND_CTX_SZ
+ ldp x19, x20, [sp, #16]
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, x30, [sp], #96
+ ret
+endfunc __tftf_suspend
+
+func __tftf_save_arch_context
+ JUMP_EL1_OR_EL2 x1, 1f, 2f, dead
+1: mrs x1, mair_el1
+ mrs x2, cpacr_el1
+ mrs x3, ttbr0_el1
+ mrs x4, tcr_el1
+ mrs x5, vbar_el1
+ mrs x6, sctlr_el1
+ stp x1, x2, [x0]
+ stp x3, x4, [x0, #16]
+ stp x5, x6, [x0, #32]
+ ret
+
+2: mrs x1, mair_el2
+ mrs x2, hcr_el2
+ mrs x3, ttbr0_el2
+ mrs x4, tcr_el2
+ mrs x5, vbar_el2
+ mrs x6, sctlr_el2
+ stp x1, x2, [x0]
+ stp x3, x4, [x0, #16]
+ stp x5, x6, [x0, #32]
+ ret
+endfunc __tftf_save_arch_context
+
+/*
+ * Restore CPU register context
+ * X0 -- Should contain the context pointer
+ */
+func __tftf_cpu_resume_ep
+ JUMP_EL1_OR_EL2 x1, 1f, 2f, dead
+1: /* Invalidate local tlb entries before turning on MMU */
+ tlbi vmalle1
+ ldp x1, x2, [x0]
+ ldp x3, x4, [x0, #16]
+ ldp x5, x6, [x0, #32]
+ msr mair_el1, x1
+ msr cpacr_el1, x2
+ msr ttbr0_el1, x3
+ msr tcr_el1, x4
+ msr vbar_el1, x5
+ /*
+ * TLB invalidations need to be completed before enabling MMU
+ */
+ dsb nsh
+ msr sctlr_el1, x6
+ /* Ensure the MMU enable takes effect immediately */
+ isb
+ b restore_callee_regs
+
+ /* Invalidate local tlb entries before turning on MMU */
+2: tlbi alle2
+ ldp x1, x2, [x0]
+ ldp x3, x4, [x0, #16]
+ ldp x5, x6, [x0, #32]
+ msr mair_el2, x1
+ msr hcr_el2, x2
+ msr ttbr0_el2, x3
+ msr tcr_el2, x4
+ msr vbar_el2, x5
+ /*
+ * TLB invalidations need to be completed before enabling MMU
+ */
+ dsb nsh
+ msr sctlr_el2, x6
+ /* Ensure the MMU enable takes effect immediately */
+ isb
+
+restore_callee_regs:
+ ldr x2, [x0, #SUSPEND_CTX_SP_OFFSET]
+ mov sp, x2
+ ldr w1, [x0, #SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET]
+ cbz w1, skip_sys_restore
+ bl tftf_restore_system_ctx
+skip_sys_restore:
+ ldp x19, x20, [sp, #16] /* Restore the callee saved registers */
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, x30, [sp], #96
+ mov x0, PSCI_E_SUCCESS
+ ret
+endfunc __tftf_cpu_resume_ep
+
+dead:
+ b .
diff --git a/lib/power_management/suspend/suspend_private.h b/lib/power_management/suspend/suspend_private.h
new file mode 100644
index 0000000..dfc2e93
--- /dev/null
+++ b/lib/power_management/suspend/suspend_private.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SUSPEND_PRIV_H__
+#define __SUSPEND_PRIV_H__
+
+#define SUSPEND_CTX_SZ 64
+#define SUSPEND_CTX_SP_OFFSET 48
+#define SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET 56
+
+#ifndef __ASSEMBLY__
+#include <cassert.h>
+#include <power_management.h>
+#include <stdint.h>
+#include <string.h>
+#include <types.h>
+
+#define NR_CTX_REGS 6
+
+/*
+ * struct tftf_suspend_ctx represents the architecture context to
+ * be saved and restored while entering suspend and coming out.
+ * It must be 16-byte aligned since it is allocated on the stack, which must be
+ * 16-byte aligned on ARMv8 (AArch64). Even though the alignment requirement
+ * is not present in AArch32, we use the same alignment and register width as
+ * it allows the same structure to be reused for AArch32.
+ */
+typedef struct tftf_suspend_context {
+ uint64_t arch_ctx_regs[NR_CTX_REGS];
+ uint64_t stack_pointer;
+ /*
+ * Whether the system context is saved and and needs to be restored.
+ * Note that the system context itself is not saved in this structure.
+ */
+ unsigned int save_system_context;
+} __aligned(16) tftf_suspend_ctx_t;
+
+/*
+ * Saves callee save registers on the stack
+ * Allocate space on stack for CPU context regs
+ * Enters suspend by calling tftf_enter_suspend.
+ * power state: PSCI power state to be sent via SMC
+ * Returns: PSCI_E_SUCCESS or PSCI_E_INVALID_PARAMS
+ */
+unsigned int __tftf_suspend(const suspend_info_t *power_state);
+
+/*
+ * Saves the architecture context of CPU in the memory
+ * tftf_suspend_context: Pointer to the location for saving the context
+ */
+void __tftf_save_arch_context(struct tftf_suspend_context *ctx);
+
+/*
+ * Calls __tftf_save_arch_context to saves arch context of cpu to the memory
+ * pointed by ctx
+ * Enters suspend by calling the SMC
+ * power state: PSCI power state to be sent via SMC
+ * ctx: Pointer to the location where suspend context can be stored
+ * Returns: PSCI_E_SUCCESS or PSCI_E_INVALID_PARAMS
+ */
+int32_t tftf_enter_suspend(const suspend_info_t *power_state,
+ tftf_suspend_ctx_t *ctx);
+
+/*
+ * Invokes the appropriate driver functions in the TFTF framework
+ * to save their context prior to a system suspend.
+ */
+void tftf_save_system_ctx(tftf_suspend_ctx_t *ctx);
+
+/*
+ * Invokes the appropriate driver functions in the TFTF framework
+ * to restore their context on wake-up from system suspend.
+ */
+void tftf_restore_system_ctx(tftf_suspend_ctx_t *ctx);
+
+/*
+ * Restores the CPU arch context and callee registers from the location pointed
+ * by X0(context ID).
+ * Returns: PSCI_E_SUCCESS
+ */
+unsigned int __tftf_cpu_resume_ep(void);
+
+/* Assembler asserts to verify #defines of offsets match as seen by compiler */
+CASSERT(SUSPEND_CTX_SZ == sizeof(tftf_suspend_ctx_t),
+ assert_suspend_context_size_mismatch);
+CASSERT(SUSPEND_CTX_SP_OFFSET == __builtin_offsetof(tftf_suspend_ctx_t, stack_pointer),
+ assert_stack_pointer_location_mismatch_in_suspend_ctx);
+CASSERT(SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET ==
+ __builtin_offsetof(tftf_suspend_ctx_t, save_system_context),
+ assert_save_sys_ctx_mismatch_in_suspend_ctx);
+#endif /* __ASSEMBLY__ */
+
+#endif /* __SUSPEND_PRIV_H__ */
diff --git a/lib/power_management/suspend/tftf_suspend.c b/lib/power_management/suspend/tftf_suspend.c
new file mode 100644
index 0000000..75c2ade
--- /dev/null
+++ b/lib/power_management/suspend/tftf_suspend.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <stdint.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+#include "suspend_private.h"
+
+int32_t tftf_enter_suspend(const suspend_info_t *info,
+ tftf_suspend_ctx_t *ctx)
+{
+ smc_args cpu_suspend_args = {
+ info->psci_api,
+ info->power_state,
+ (uintptr_t)__tftf_cpu_resume_ep,
+ (u_register_t)ctx
+ };
+
+ smc_args system_suspend_args = {
+ info->psci_api,
+ (uintptr_t)__tftf_cpu_resume_ep,
+ (u_register_t)ctx
+ };
+
+ smc_ret_values rc;
+
+ if (info->save_system_context) {
+ ctx->save_system_context = 1;
+ tftf_save_system_ctx(ctx);
+ } else
+ ctx->save_system_context = 0;
+
+ /*
+ * Save the CPU context. It will be restored in resume path in
+ * __tftf_cpu_resume_ep().
+ */
+ __tftf_save_arch_context(ctx);
+
+ /*
+ * Flush the context that must be retrieved with MMU off
+ */
+ flush_dcache_range((u_register_t)ctx, sizeof(*ctx));
+
+ if (info->psci_api == SMC_PSCI_CPU_SUSPEND)
+ rc = tftf_smc(&cpu_suspend_args);
+ else
+ rc = tftf_smc(&system_suspend_args);
+
+ /*
+ * If execution reaches this point, The above SMC call was an invalid
+ * call or a suspend to standby call. In both cases the CPU does not
+ * power down so there is no need to restore the context.
+ */
+ return rc.ret0;
+}
+
+void tftf_restore_system_ctx(tftf_suspend_ctx_t *ctx)
+{
+ assert(ctx != NULL);
+ assert(ctx->save_system_context);
+
+ /*
+ * TODO: Check if there is a need for separate platform
+ * API for resume.
+ */
+
+ tftf_early_platform_setup();
+
+ INFO("Restoring system context\n");
+
+ /* restore the global GIC context */
+ arm_gic_restore_context_global();
+ tftf_timer_gic_state_restore();
+}
+
+void tftf_save_system_ctx(tftf_suspend_ctx_t *ctx)
+{
+ assert(ctx != NULL);
+ assert(ctx->save_system_context);
+
+ /* Nothing to do here currently */
+ INFO("Saving system context\n");
+
+ /* Save the global GIC context */
+ arm_gic_save_context_global();
+}
+
+int tftf_suspend(const suspend_info_t *info)
+{
+ int32_t rc;
+ uint64_t flags;
+
+ flags = read_daif();
+
+ disable_irq();
+
+ INFO("Going into suspend state\n");
+
+ /* Save the local GIC context */
+ arm_gic_save_context_local();
+
+ rc = __tftf_suspend(info);
+
+ /* Restore the local GIC context */
+ arm_gic_restore_context_local();
+
+ /*
+ * DAIF flags should be restored last because it could be an issue
+ * to unmask exceptions before that point, e.g. if GIC must be
+ * reconfigured upon resume from suspend.
+ */
+ write_daif(flags);
+
+ INFO("Resumed from suspend state\n");
+
+ return rc;
+}