Move context management code to common location

The upcoming Firmware Update feature needs transitioning across
Secure/Normal worlds to complete the FWU process and hence requires
context management code to perform this task.

Currently context management code is part of BL31 stage only.
This patch moves the code from (include)/bl31 to (include)/common.
Some function declarations/definitions and macros have also moved
to different files to help code sharing.

Change-Id: I3858b08aecdb76d390765ab2b099f457873f7b0c
diff --git a/include/common/bl_common.h b/include/common/bl_common.h
index 0eec989..d8741c9 100644
--- a/include/common/bl_common.h
+++ b/include/common/bl_common.h
@@ -89,6 +89,26 @@
 	(_p)->h.attr = (uint32_t)(_attr) ; \
 	} while (0)
 
+/*******************************************************************************
+ * Constants to indicate type of exception to the common exception handler.
+ ******************************************************************************/
+#define SYNC_EXCEPTION_SP_EL0		0x0
+#define IRQ_SP_EL0			0x1
+#define FIQ_SP_EL0			0x2
+#define SERROR_SP_EL0			0x3
+#define SYNC_EXCEPTION_SP_ELX		0x4
+#define IRQ_SP_ELX			0x5
+#define FIQ_SP_ELX			0x6
+#define SERROR_SP_ELX			0x7
+#define SYNC_EXCEPTION_AARCH64		0x8
+#define IRQ_AARCH64			0x9
+#define FIQ_AARCH64			0xa
+#define SERROR_AARCH64			0xb
+#define SYNC_EXCEPTION_AARCH32		0xc
+#define IRQ_AARCH32			0xd
+#define FIQ_AARCH32			0xe
+#define SERROR_AARCH32			0xf
+
 #ifndef __ASSEMBLY__
 #include <cdefs.h> /* For __dead2 */
 #include <cassert.h>
diff --git a/include/common/context.h b/include/common/context.h
new file mode 100644
index 0000000..0dfebe0
--- /dev/null
+++ b/include/common/context.h
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CONTEXT_H__
+#define __CONTEXT_H__
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'gp_regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#define CTX_GPREGS_OFFSET	0x0
+#define CTX_GPREG_X0		0x0
+#define CTX_GPREG_X1		0x8
+#define CTX_GPREG_X2		0x10
+#define CTX_GPREG_X3		0x18
+#define CTX_GPREG_X4		0x20
+#define CTX_GPREG_X5		0x28
+#define CTX_GPREG_X6		0x30
+#define CTX_GPREG_X7		0x38
+#define CTX_GPREG_X8		0x40
+#define CTX_GPREG_X9		0x48
+#define CTX_GPREG_X10		0x50
+#define CTX_GPREG_X11		0x58
+#define CTX_GPREG_X12		0x60
+#define CTX_GPREG_X13		0x68
+#define CTX_GPREG_X14		0x70
+#define CTX_GPREG_X15		0x78
+#define CTX_GPREG_X16		0x80
+#define CTX_GPREG_X17		0x88
+#define CTX_GPREG_X18		0x90
+#define CTX_GPREG_X19		0x98
+#define CTX_GPREG_X20		0xa0
+#define CTX_GPREG_X21		0xa8
+#define CTX_GPREG_X22		0xb0
+#define CTX_GPREG_X23		0xb8
+#define CTX_GPREG_X24		0xc0
+#define CTX_GPREG_X25		0xc8
+#define CTX_GPREG_X26		0xd0
+#define CTX_GPREG_X27		0xd8
+#define CTX_GPREG_X28		0xe0
+#define CTX_GPREG_X29		0xe8
+#define CTX_GPREG_LR		0xf0
+#define CTX_GPREG_SP_EL0	0xf8
+#define CTX_GPREGS_END		0x100
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'el3_state'
+ * structure at their correct offsets. Note that some of the registers are only
+ * 32-bits wide but are stored as 64-bit values for convenience
+ ******************************************************************************/
+#define CTX_EL3STATE_OFFSET	(CTX_GPREGS_OFFSET + CTX_GPREGS_END)
+#define CTX_SCR_EL3		0x0
+#define CTX_RUNTIME_SP		0x8
+#define CTX_SPSR_EL3		0x10
+#define CTX_ELR_EL3		0x18
+#define CTX_EL3STATE_END	0x20
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the
+ * 'el1_sys_regs' structure at their correct offsets. Note that some of the
+ * registers are only 32-bits wide but are stored as 64-bit values for
+ * convenience
+ ******************************************************************************/
+#define CTX_SYSREGS_OFFSET	(CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
+#define CTX_SPSR_EL1		0x0
+#define CTX_ELR_EL1		0x8
+#define CTX_SPSR_ABT		0x10
+#define CTX_SPSR_UND		0x18
+#define CTX_SPSR_IRQ		0x20
+#define CTX_SPSR_FIQ		0x28
+#define CTX_SCTLR_EL1		0x30
+#define CTX_ACTLR_EL1		0x38
+#define CTX_CPACR_EL1		0x40
+#define CTX_CSSELR_EL1		0x48
+#define CTX_SP_EL1		0x50
+#define CTX_ESR_EL1		0x58
+#define CTX_TTBR0_EL1		0x60
+#define CTX_TTBR1_EL1		0x68
+#define CTX_MAIR_EL1		0x70
+#define CTX_AMAIR_EL1		0x78
+#define CTX_TCR_EL1		0x80
+#define CTX_TPIDR_EL1		0x88
+#define CTX_TPIDR_EL0		0x90
+#define CTX_TPIDRRO_EL0		0x98
+#define CTX_DACR32_EL2		0xa0
+#define CTX_IFSR32_EL2		0xa8
+#define CTX_PAR_EL1		0xb0
+#define CTX_FAR_EL1		0xb8
+#define CTX_AFSR0_EL1		0xc0
+#define CTX_AFSR1_EL1		0xc8
+#define CTX_CONTEXTIDR_EL1	0xd0
+#define CTX_VBAR_EL1		0xd8
+/*
+ * If the timer registers aren't saved and restored, we don't have to reserve
+ * space for them in the context
+ */
+#if NS_TIMER_SWITCH
+#define CTX_CNTP_CTL_EL0	0xe0
+#define CTX_CNTP_CVAL_EL0	0xe8
+#define CTX_CNTV_CTL_EL0	0xf0
+#define CTX_CNTV_CVAL_EL0	0xf8
+#define CTX_CNTKCTL_EL1		0x100
+#define CTX_FP_FPEXC32_EL2	0x108
+#define CTX_SYSREGS_END		0x110
+#else
+#define CTX_FP_FPEXC32_EL2	0xe0
+#define CTX_SYSREGS_END		0xf0
+#endif
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'fp_regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#if CTX_INCLUDE_FPREGS
+#define CTX_FPREGS_OFFSET	(CTX_SYSREGS_OFFSET + CTX_SYSREGS_END)
+#define CTX_FP_Q0		0x0
+#define CTX_FP_Q1		0x10
+#define CTX_FP_Q2		0x20
+#define CTX_FP_Q3		0x30
+#define CTX_FP_Q4		0x40
+#define CTX_FP_Q5		0x50
+#define CTX_FP_Q6		0x60
+#define CTX_FP_Q7		0x70
+#define CTX_FP_Q8		0x80
+#define CTX_FP_Q9		0x90
+#define CTX_FP_Q10		0xa0
+#define CTX_FP_Q11		0xb0
+#define CTX_FP_Q12		0xc0
+#define CTX_FP_Q13		0xd0
+#define CTX_FP_Q14		0xe0
+#define CTX_FP_Q15		0xf0
+#define CTX_FP_Q16		0x100
+#define CTX_FP_Q17		0x110
+#define CTX_FP_Q18		0x120
+#define CTX_FP_Q19		0x130
+#define CTX_FP_Q20		0x140
+#define CTX_FP_Q21		0x150
+#define CTX_FP_Q22		0x160
+#define CTX_FP_Q23		0x170
+#define CTX_FP_Q24		0x180
+#define CTX_FP_Q25		0x190
+#define CTX_FP_Q26		0x1a0
+#define CTX_FP_Q27		0x1b0
+#define CTX_FP_Q28		0x1c0
+#define CTX_FP_Q29		0x1d0
+#define CTX_FP_Q30		0x1e0
+#define CTX_FP_Q31		0x1f0
+#define CTX_FP_FPSR		0x200
+#define CTX_FP_FPCR		0x208
+#define CTX_FPREGS_END		0x210
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <platform_def.h>	/* for CACHE_WRITEBACK_GRANULE */
+#include <stdint.h>
+
+/*
+ * Common constants to help define the 'cpu_context' structure and its
+ * members below.
+ */
+#define DWORD_SHIFT		3
+#define DEFINE_REG_STRUCT(name, num_regs)	\
+	typedef struct name {			\
+		uint64_t _regs[num_regs];	\
+	}  __aligned(16) name##_t
+
+/* Constants to determine the size of individual context structures */
+#define CTX_GPREG_ALL		(CTX_GPREGS_END >> DWORD_SHIFT)
+#define CTX_SYSREG_ALL		(CTX_SYSREGS_END >> DWORD_SHIFT)
+#if CTX_INCLUDE_FPREGS
+#define CTX_FPREG_ALL		(CTX_FPREGS_END >> DWORD_SHIFT)
+#endif
+#define CTX_EL3STATE_ALL	(CTX_EL3STATE_END >> DWORD_SHIFT)
+
+/*
+ * AArch64 general purpose register context structure. Usually x0-x18,
+ * lr are saved as the compiler is expected to preserve the remaining
+ * callee saved registers if used by the C runtime and the assembler
+ * does not touch the remaining. But in case of world switch during
+ * exception handling, we need to save the callee registers too.
+ */
+DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
+
+/*
+ * AArch64 EL1 system register context structure for preserving the
+ * architectural state during switches from one security state to
+ * another in EL1.
+ */
+DEFINE_REG_STRUCT(el1_sys_regs, CTX_SYSREG_ALL);
+
+/*
+ * AArch64 floating point register context structure for preserving
+ * the floating point state during switches from one security state to
+ * another.
+ */
+#if CTX_INCLUDE_FPREGS
+DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL);
+#endif
+
+/*
+ * Miscellaneous registers used by EL3 firmware to maintain its state
+ * across exception entries and exits
+ */
+DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
+
+/*
+ * Macros to access members of any of the above structures using their
+ * offsets
+ */
+#define read_ctx_reg(ctx, offset)	((ctx)->_regs[offset >> DWORD_SHIFT])
+#define write_ctx_reg(ctx, offset, val)	(((ctx)->_regs[offset >> DWORD_SHIFT]) \
+					 = val)
+
+/*
+ * Top-level context structure which is used by EL3 firmware to
+ * preserve the state of a core at EL1 in one of the two security
+ * states and save enough EL3 meta data to be able to return to that
+ * EL and security state. The context management library will be used
+ * to ensure that SP_EL3 always points to an instance of this
+ * structure at exception entry and exit. Each instance will
+ * correspond to either the secure or the non-secure state.
+ */
+typedef struct cpu_context {
+	gp_regs_t gpregs_ctx;
+	el3_state_t el3state_ctx;
+	el1_sys_regs_t sysregs_ctx;
+#if CTX_INCLUDE_FPREGS
+	fp_regs_t fpregs_ctx;
+#endif
+} cpu_context_t;
+
+/* Macros to access members of the 'cpu_context_t' structure */
+#define get_el3state_ctx(h)	(&((cpu_context_t *) h)->el3state_ctx)
+#if CTX_INCLUDE_FPREGS
+#define get_fpregs_ctx(h)	(&((cpu_context_t *) h)->fpregs_ctx)
+#endif
+#define get_sysregs_ctx(h)	(&((cpu_context_t *) h)->sysregs_ctx)
+#define get_gpregs_ctx(h)	(&((cpu_context_t *) h)->gpregs_ctx)
+
+/*
+ * Compile time assertions related to the 'cpu_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \
+	assert_core_context_gp_offset_mismatch);
+CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, sysregs_ctx), \
+	assert_core_context_sys_offset_mismatch);
+#if CTX_INCLUDE_FPREGS
+CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \
+	assert_core_context_fp_offset_mismatch);
+#endif
+CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \
+	assert_core_context_el3state_offset_mismatch);
+
+/*
+ * Helper macro to set the general purpose registers that correspond to
+ * parameters in an aapcs_64 call i.e. x0-x7
+ */
+#define set_aapcs_args0(ctx, x0)				do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0);	\
+	} while (0);
+#define set_aapcs_args1(ctx, x0, x1)				do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1);	\
+		set_aapcs_args0(ctx, x0);				\
+	} while (0);
+#define set_aapcs_args2(ctx, x0, x1, x2)			do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2);	\
+		set_aapcs_args1(ctx, x0, x1);				\
+	} while (0);
+#define set_aapcs_args3(ctx, x0, x1, x2, x3)			do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3);	\
+		set_aapcs_args2(ctx, x0, x1, x2);			\
+	} while (0);
+#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4)		do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4);	\
+		set_aapcs_args3(ctx, x0, x1, x2, x3);			\
+	} while (0);
+#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5)		do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5);	\
+		set_aapcs_args4(ctx, x0, x1, x2, x3, x4);		\
+	} while (0);
+#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6)	do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6);	\
+		set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5);		\
+	} while (0);
+#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7)	do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7);	\
+		set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6);	\
+	} while (0);
+
+/*******************************************************************************
+ * Function prototypes
+ ******************************************************************************/
+void el1_sysregs_context_save(el1_sys_regs_t *regs);
+void el1_sysregs_context_restore(el1_sys_regs_t *regs);
+#if CTX_INCLUDE_FPREGS
+void fpregs_context_save(fp_regs_t *regs);
+void fpregs_context_restore(fp_regs_t *regs);
+#endif
+
+
+#undef CTX_SYSREG_ALL
+#if CTX_INCLUDE_FPREGS
+#undef CTX_FPREG_ALL
+#endif
+#undef CTX_GPREG_ALL
+#undef CTX_EL3STATE_ALL
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __CONTEXT_H__ */
diff --git a/include/common/context_mgmt.h b/include/common/context_mgmt.h
new file mode 100644
index 0000000..141b348
--- /dev/null
+++ b/include/common/context_mgmt.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CM_H__
+#define __CM_H__
+
+#include <arch.h>
+#include <bl_common.h>
+#include <common_def.h>
+
+/*******************************************************************************
+ * Forward declarations
+ ******************************************************************************/
+struct entry_point_info;
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+void cm_init(void);
+void *cm_get_context_by_mpidr(uint64_t mpidr,
+			      uint32_t security_state) __warn_deprecated;
+void cm_set_context_by_mpidr(uint64_t mpidr,
+			     void *context,
+			     uint32_t security_state) __warn_deprecated;
+void *cm_get_context_by_index(unsigned int cpu_idx,
+			      unsigned int security_state);
+void cm_set_context_by_index(unsigned int cpu_idx,
+			     void *context,
+			     unsigned int security_state);
+void *cm_get_context(uint32_t security_state);
+void cm_set_context(void *context, uint32_t security_state);
+inline void cm_set_next_context(void *context);
+void cm_init_context(uint64_t mpidr,
+		     const struct entry_point_info *ep) __warn_deprecated;
+void cm_init_my_context(const struct entry_point_info *ep);
+void cm_init_context_by_index(unsigned int cpu_idx,
+			      const struct entry_point_info *ep);
+void cm_prepare_el3_exit(uint32_t security_state);
+void cm_el1_sysregs_context_save(uint32_t security_state);
+void cm_el1_sysregs_context_restore(uint32_t security_state);
+void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint);
+void cm_set_elr_spsr_el3(uint32_t security_state,
+			 uint64_t entrypoint, uint32_t spsr);
+void cm_write_scr_el3_bit(uint32_t security_state,
+			  uint32_t bit_pos,
+			  uint32_t value);
+void cm_set_next_eret_context(uint32_t security_state);
+uint32_t cm_get_scr_el3(uint32_t security_state);
+
+/* Inline definitions */
+
+/*******************************************************************************
+ * This function is used to program the context that's used for exception
+ * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
+ * the required security state
+ ******************************************************************************/
+inline void cm_set_next_context(void *context)
+{
+#if DEBUG
+	uint64_t sp_mode;
+
+	/*
+	 * Check that this function is called with SP_EL0 as the stack
+	 * pointer
+	 */
+	__asm__ volatile("mrs	%0, SPSel\n"
+			 : "=r" (sp_mode));
+
+	assert(sp_mode == MODE_SP_EL0);
+#endif
+
+	__asm__ volatile("msr	spsel, #1\n"
+			 "mov	sp, %0\n"
+			 "msr	spsel, #0\n"
+			 : : "r" (context));
+}
+#endif /* __CM_H__ */
diff --git a/include/common/smcc_helpers.h b/include/common/smcc_helpers.h
new file mode 100644
index 0000000..6a07b01
--- /dev/null
+++ b/include/common/smcc_helpers.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SMCC_HELPERS_H__
+#define __SMCC_HELPERS_H__
+
+/*******************************************************************************
+ * Bit definitions inside the function id as per the SMC calling convention
+ ******************************************************************************/
+#define FUNCID_TYPE_SHIFT		31
+#define FUNCID_CC_SHIFT			30
+#define FUNCID_OEN_SHIFT		24
+#define FUNCID_NUM_SHIFT		0
+
+#define FUNCID_TYPE_MASK		0x1
+#define FUNCID_CC_MASK			0x1
+#define FUNCID_OEN_MASK			0x3f
+#define FUNCID_NUM_MASK			0xffff
+
+#define FUNCID_TYPE_WIDTH		1
+#define FUNCID_CC_WIDTH			1
+#define FUNCID_OEN_WIDTH		6
+#define FUNCID_NUM_WIDTH		16
+
+#define GET_SMC_CC(id)			((id >> FUNCID_CC_SHIFT) & \
+					 FUNCID_CC_MASK)
+#define GET_SMC_TYPE(id)		((id >> FUNCID_TYPE_SHIFT) & \
+					 FUNCID_TYPE_MASK)
+
+#define SMC_64				1
+#define SMC_32				0
+#define SMC_UNK				0xffffffff
+#define SMC_TYPE_FAST			1
+#define SMC_TYPE_STD			0
+#define SMC_PREEMPTED		0xfffffffe
+/*******************************************************************************
+ * Owning entity number definitions inside the function id as per the SMC
+ * calling convention
+ ******************************************************************************/
+#define OEN_ARM_START			0
+#define OEN_ARM_END			0
+#define OEN_CPU_START			1
+#define OEN_CPU_END			1
+#define OEN_SIP_START			2
+#define OEN_SIP_END			2
+#define OEN_OEM_START			3
+#define OEN_OEM_END			3
+#define OEN_STD_START			4	/* Standard Calls */
+#define OEN_STD_END			4
+#define OEN_TAP_START			48	/* Trusted Applications */
+#define OEN_TAP_END			49
+#define OEN_TOS_START			50	/* Trusted OS */
+#define OEN_TOS_END			63
+#define OEN_LIMIT			64
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <context.h>
+#include <stdint.h>
+
+/* Various flags passed to SMC handlers */
+#define SMC_FROM_SECURE		(0 << 0)
+#define SMC_FROM_NON_SECURE	(1 << 0)
+
+#define is_caller_non_secure(_f)	(!!(_f & SMC_FROM_NON_SECURE))
+#define is_caller_secure(_f)		(!(is_caller_non_secure(_f)))
+
+/* Convenience macros to return from SMC handler */
+#define SMC_RET0(_h)	{ \
+	return (uint64_t) (_h);		\
+}
+#define SMC_RET1(_h, _x0)	{ \
+	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \
+	SMC_RET0(_h);						\
+}
+#define SMC_RET2(_h, _x0, _x1)	{ \
+	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \
+	SMC_RET1(_h, (_x0)); \
+}
+#define SMC_RET3(_h, _x0, _x1, _x2)	{ \
+	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \
+	SMC_RET2(_h, (_x0), (_x1)); \
+}
+#define SMC_RET4(_h, _x0, _x1, _x2, _x3)	{ \
+	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \
+	SMC_RET3(_h, (_x0), (_x1), (_x2)); \
+}
+
+/*
+ * Convenience macros to access general purpose registers using handle provided
+ * to SMC handler. These takes the offset values defined in context.h
+ */
+#define SMC_GET_GP(_h, _g) \
+	read_ctx_reg(get_gpregs_ctx(_h), (_g));
+#define SMC_SET_GP(_h, _g, _v) \
+	write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v));
+
+/*
+ * Convenience macros to access EL3 context registers using handle provided to
+ * SMC handler. These takes the offset values defined in context.h
+ */
+#define SMC_GET_EL3(_h, _e) \
+	read_ctx_reg(get_el3state_ctx(_h), (_e));
+#define SMC_SET_EL3(_h, _e, _v) \
+	write_ctx_reg(get_el3state_ctx(_h), (_e), (_v));
+
+/* The macro below is used to identify a Standard Service SMC call */
+#define is_std_svc_call(_fid)		((((_fid) >> FUNCID_OEN_SHIFT) & \
+					   FUNCID_OEN_MASK) == OEN_STD_START)
+
+/* The macro below is used to identify a valid Fast SMC call */
+#define is_valid_fast_smc(_fid)		((!(((_fid) >> 16) & 0xff)) && \
+					   (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST))
+
+/*
+ * Macro to define UUID for services. Apart from defining and initializing a
+ * uuid_t structure, this macro verifies that the first word of the defined UUID
+ * does not equal SMC_UNK. This is to ensure that the caller won't mistake the
+ * returned UUID in x0 for an invalid SMC error return
+ */
+#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \
+		_n0, _n1, _n2, _n3, _n4, _n5) \
+	CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\
+	static const uuid_t _name = { \
+		_tl, _tm, _th, _cl, _ch, \
+		{ _n0, _n1, _n2, _n3, _n4, _n5 } \
+	}
+
+/* Return a UUID in the SMC return registers */
+#define SMC_UUID_RET(_h, _uuid) \
+	SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
+			 ((const uint32_t *) &(_uuid))[1], \
+			 ((const uint32_t *) &(_uuid))[2], \
+			 ((const uint32_t *) &(_uuid))[3])
+
+#endif /*__ASSEMBLY__*/
+#endif /* __SMCC_HELPERS_H__ */