TF-RMM Release v0.1.0

This is the first external release of TF-RMM and provides a reference
implementation of Realm Management Monitor (RMM) as specified by the
RMM Beta0 specification[1].

The `docs/readme.rst` has more details about the project and
`docs/getting_started/getting-started.rst` has details on how to get
started with TF-RMM.

[1] https://developer.arm.com/documentation/den0137/1-0bet0/?lang=en

Signed-off-by: Soby Mathew <soby.mathew@arm.com>
Change-Id: I205ef14c015e4a37ae9ae1a64e4cd22eb8da746e
diff --git a/runtime/core/aarch64/entry.S b/runtime/core/aarch64/entry.S
new file mode 100644
index 0000000..5b557d6
--- /dev/null
+++ b/runtime/core/aarch64/entry.S
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <asm_macros.S>
+#include <smc.h>
+
+.globl rmm_handler
+
+func rmm_handler
+	/*
+	 * Save Link Register and X4, as per SMCCC v1.2 its value
+	 * must be preserved unless it contains result, as specified
+	 * in the function definition.
+	 */
+	stp	x4, lr, [sp, #-16]!
+
+	/*
+	 * Zero the space for X0-X3 in the smc_result structure
+	 * and pass its address as the last argument.
+	 */
+	stp	xzr, xzr, [sp, #-16]!
+	stp	xzr, xzr, [sp, #-16]!
+	mov	x7, sp
+
+	bl	handle_ns_smc
+
+	/*
+	 * Copy command output values back to caller. Since this is
+	 * done through SMC, X0 is used as the FID, and X1-X5 contain
+	 * the values of X0-X4 copied from the smc_result structure.
+	 */
+	ldr	x0, =SMC_RMM_REQ_COMPLETE
+	ldp	x1, x2, [sp], #16
+	ldp	x3, x4, [sp], #16
+	ldp	x5, lr, [sp], #16
+
+	smc	#0
+
+	/* Continue the rmm handling loop */
+	b	rmm_handler
+endfunc rmm_handler
diff --git a/runtime/core/aarch64/head.S b/runtime/core/aarch64/head.S
new file mode 100644
index 0000000..16d3dac
--- /dev/null
+++ b/runtime/core/aarch64/head.S
@@ -0,0 +1,175 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <rmm_el3_ifc.h>
+#include <sizes.h>
+#include <smc.h>
+#include <xlat_tables.h>
+
+#define RMM_STACK_SIZE		(SZ_4K * RMM_NUM_PAGES_PER_STACK)
+
+.globl rmm_entry
+
+/*
+ * Initialize essential R-EL2 sysregs and C runtime environment
+ */
+.macro rmm_el2_init_env _vector, _is_cold_boot_flag, _warm_boot
+
+	/*
+	 * Stash arguments from previous boot stage
+	 */
+	mov	x20, x0
+	mov	x21, x1
+	mov	x22, x2
+	mov	x23, x3
+
+	mov_imm	x1, SCTLR_EL2_INIT
+	msr	sctlr_el2, x1
+
+	mov_imm	x2, HCR_EL2_INIT
+	msr	hcr_el2, x2
+
+	mov_imm	x3, CPTR_EL2_INIT
+	msr	cptr_el2, x3
+
+	mov_imm	x4, ICC_SRE_EL2_INIT
+	msr	ICC_SRE_EL2, x4
+
+	isb
+
+	ldr	x1, \_is_cold_boot_flag
+	cbz	x1, 1f
+
+	/*
+	 * As PIE is enabled, fixup the Global Descriptor Table only
+	 * once during cold boot. This is needed before accessing any
+	 * symbol addresses.
+	 */
+	bl	fixup_gdt_reloc
+
+	/* Cold and warm boot need to go through this path */
+1:
+	/* Early validate and init CPU Id */
+	mov	x0, x20
+	bl	rmm_el3_ifc_validate_cpuid
+
+	/* Setup stack on this CPU. X0 already contains the CPU Id */
+	bl	rmm_get_my_stack
+	mov	sp, x0
+
+	/*
+	 * Setup exception vectors
+	 */
+	adrp	x3, \_vector
+	add	x3, x3, :lo12:\_vector
+	msr	vbar_el2, x3
+	isb
+
+	/*
+	 * Find out whether this is a cold or warm boot
+	 */
+	ldr	x1, \_is_cold_boot_flag
+	cbnz	x1, 2f
+
+	/*
+	 * Restore arguments in preparation for the warm boot path
+	 */
+	mov	x0, x20
+	mov	x1, x21
+	mov	x2, x22
+	mov	x3, x23
+	b	\_warm_boot
+
+2:
+	/*
+	 * Update cold boot flag to indicate cold boot is done
+	 */
+	adr	x2, \_is_cold_boot_flag
+	str	xzr, [x2]
+
+	/*
+	 * Initialize BSS section
+	 */
+	adrp	x0, bss_start
+	add	x0, x0, :lo12:bss_start
+	adrp	x1, bss_end
+	add	x1, x1, :lo12:bss_end
+	sub	x2, x1, x0
+	mov	x1, xzr
+	bl	memset
+
+	/*
+	 * Restore args received from previous BL image
+	 */
+	mov	x0, x20
+	mov	x1, x21
+	mov	x2, x22
+	mov	x3, x23
+.endm
+
+/*
+ * This is the main entry for both Primary and secondary PEs.
+ */
+func rmm_entry
+
+	rmm_el2_init_env el2_vectors, cold_boot_flag, skip_to_warmboot
+
+	/*
+	 * Initialize platform specific peripherals like UART and
+	 * xlat tables.
+	 */
+	bl	plat_setup
+	bl	xlat_enable_mmu_el2
+
+	bl	rmm_main
+	b	smc_ret
+
+skip_to_warmboot:
+	/*
+	 * Carry on with the rest of the RMM warmboot path
+	 */
+	bl	plat_warmboot_setup
+	bl	xlat_enable_mmu_el2
+
+	bl	rmm_warmboot_main
+smc_ret:
+	mov_imm	x0, SMC_RMM_BOOT_COMPLETE
+	mov_imm	x1, E_RMM_BOOT_SUCCESS
+	smc	#0
+
+	/* Jump to the SMC handler post-init */
+	b	rmm_handler
+
+	/*
+	 * Flag to mark if it is a cold boot.
+	 * 1: cold boot, 0: warmboot.
+	 */
+.align 3
+cold_boot_flag:
+	.dword		1
+endfunc rmm_entry
+
+/*
+ * Return the stack for a given PE index in x0
+ * stack-start				     stack_end
+ *       o--sz---o....o--sz---o--sz---o--sz---o
+ *       ^\_____/^....^\_____/^\_____/^\_____/^
+ * id = (MAX_CPU-1)      2       1       0
+ * Arg : x0 - CPU position
+ * sz: RMM_STACK_SIZE bytes.
+ */
+func rmm_get_my_stack
+#ifndef NDEBUG
+	cmp	x0, #MAX_CPUS
+	ASM_ASSERT lo
+#endif
+	adrp	x1, stack_end
+	add	x1, x1, :lo12:stack_end
+	mov	x2, #(RMM_STACK_SIZE)	/* stack size per CPU */
+	umsubl	x0, w0, w2, x1
+	ret
+endfunc rmm_get_my_stack
diff --git a/runtime/core/aarch64/helpers.S b/runtime/core/aarch64/helpers.S
new file mode 100644
index 0000000..55bfc97
--- /dev/null
+++ b/runtime/core/aarch64/helpers.S
@@ -0,0 +1,127 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <xlat_defs.h>
+
+	.globl	fixup_gdt_reloc
+
+/* ---------------------------------------------------------------------------
+ * Helper to fixup Global Descriptor table (GDT) and dynamic relocations
+ * (.rela.dyn) at runtime.
+ *
+ * This function is meant to be used when the firmware is compiled with -fpie
+ * and linked with -pie options. We rely on the linker script exporting
+ * appropriate markers for start and end of the section. For Global Offset
+ * Table (GOT), we expect 'rmm_got_start' and 'rmm_got_end' symbols to be
+ * defined. Similarly for *.rela.dyn, we expect rmm_rela_start and rmm_rela_end
+ * to be defined. We also expect `rmm_base` and `rmm_end` symbols to be
+ * defined by the linker script and are 4KB aligned. The RMM should be
+ * statically linked to start at 0x0.
+ *
+ * Clobber list: x0 to x7.
+ * ---------------------------------------------------------------------------
+ */
+
+/* Relocation codes */
+#define	R_AARCH64_NONE		0
+#define	R_AARCH64_RELATIVE	1027
+
+func fixup_gdt_reloc
+	/* Lower Limit for fixup */
+	mov	x0, xzr
+	/* rmm_base and rmm_end are 4KB aligned hence adrp is enough */
+	adrp	x2, rmm_base
+	adrp	x1, rmm_end
+	/* Upper Limit for fixup (rmm_end - rmm_base) */
+	sub	x1, x1, x2
+
+	/*
+	 * Since RMM will be compiled to start at 0x0, the current
+         * PC relative `rmm_base` loaded in x2 will be the Diff(S)
+	 * to be applied to the fixups.
+	 */
+	cbz	x2, 4f	/* Diff(S) = 0. No relocation needed */
+
+	adrp	x6, rmm_got_start
+	add	x6, x6, :lo12:rmm_got_start
+	adrp	x7, rmm_got_end
+	add	x7, x7, :lo12:rmm_got_end
+
+	/*
+	 * GOT is an array of 64_bit addresses which must be fixed up as
+	 * new_addr = old_addr + Diff(S).
+	 * The new_addr is the address currently the binary is executing from
+	 * and old_addr is the address at compile time.
+	 */
+1:	ldr	x3, [x6]
+	/* Skip adding offset if address is < lower limit */
+	cmp	x3, x0
+	b.lo	2f
+
+	/* Skip adding offset if address is > upper limit */
+	cmp	x3, x1
+	b.hi	2f
+	add	x3, x3, x2
+	str	x3, [x6]
+
+2:	add	x6, x6, #8
+	cmp	x6, x7
+	b.lo	1b
+
+	/* Starting dynamic relocations */
+3:	adrp	x6, rmm_rela_start
+	add	x6, x6, :lo12:rmm_rela_start
+	adrp	x7, rmm_rela_end
+	add	x7, x7, :lo12:rmm_rela_end
+
+	/*
+	 * According to ELF-64 specification, the RELA data structure is as
+	 * follows:
+	 *	typedef struct {
+	 *		Elf64_Addr r_offset;
+	 *		Elf64_Xword r_info;
+	 *		Elf64_Sxword r_addend;
+	 *	} Elf64_Rela;
+	 *
+	 * r_offset is address of reference
+	 * r_info is symbol index and type of relocation (in this case
+	 * code 1027 which corresponds to R_AARCH64_RELATIVE).
+	 * r_addend is constant part of expression.
+	 *
+	 * Size of Elf64_Rela structure is 24 bytes.
+	 */
+
+1:	ldr	x3, [x6, #8]	/* r_info */
+	/* Skip R_AARCH64_NONE entry with code 0 */
+	cbz	x3, 2f
+
+#ifndef NDEBUG
+	/* Assert that the relocation type is R_AARCH64_RELATIVE */
+	cmp	x3, #R_AARCH64_RELATIVE
+	ASM_ASSERT eq
+#endif
+	ldr	x4, [x6, #16]	/* r_addend */
+
+	/* Skip adding offset if r_addend is < lower limit */
+	cmp	x4, x0
+	b.lo	2f
+
+	/* Skip adding offset if r_addend entry is > upper limit */
+	cmp	x4, x1
+	b.hi	2f
+
+	ldr	x3, [x6]	/* r_offset */
+	add	x4, x4, x2	/* Diff(S) + r_addend */
+	str	x4, [x3, x2]
+
+2:	add	x6, x6, #24
+	cmp	x6, x7
+	b.lo	1b
+
+4:
+	ret
+endfunc fixup_gdt_reloc
diff --git a/runtime/core/aarch64/ns_access.S b/runtime/core/aarch64/ns_access.S
new file mode 100644
index 0000000..b39d2ec
--- /dev/null
+++ b/runtime/core/aarch64/ns_access.S
@@ -0,0 +1,79 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <asm_macros.S>
+
+.section ".text"
+
+/*
+ * The following addresses are registered with the exception handler:
+ */
+.global ns_read
+.global ns_write
+
+.global memcpy_ns_read
+.global memcpy_ns_write
+.global ns_access_ret_0
+
+/*
+ * Copy data from NS into Realm memory.
+ * The function returns 1 if the copy succeeds.
+ * If the access to the NS memory generates a GPF, the exception handler
+ * returns to ns_access_ret_0 and 0 is returned to the caller.
+ * In case of failure (when 0 is returned), partial data may have been
+ * written to the destination buffer
+ *
+ * x0 - The address of buffer in Realm memory to write into
+ * x1 - The address of buffer in NS memory to read from.
+ * x2 - The number of bytes to read in bytes.
+ * All arguments must be aligned to 8 bytes.
+ */
+func memcpy_ns_read
+	cbz	x2, 2f
+	mov	x3, #0
+1:
+ns_read:
+	ldr	x4, [x1], #8
+	str	x4, [x0], #8
+	add	x3, x3, #8
+	cmp	x3, x2
+	bne	1b
+2:
+	mov	x0, #1
+	ret
+endfunc memcpy_ns_read
+
+/*
+ * Copy data from Realm into NS memory.
+ * The function returns 1 if the copy succeeds.
+ * If the access to the NS memory generates a GPF, the exception handler
+ * returns to ns_access_ret_0 and 0 is returned to the caller.
+ * In case of failure (when 0 is returned), partial data may have been
+ * written to the destination buffer
+ *
+ * x0 - The address of buffer in NS memory to write into
+ * x1 - The address of buffer in Realm memory to read from.
+ * x2 - The number of bytes to write.
+ * All arguments must be aligned to 8 bytes.
+ */
+func memcpy_ns_write
+	cbz	x2, 2f
+	mov	x3, #0
+1:
+	ldr	x4, [x1], #8
+ns_write:
+	str	x4, [x0], #8
+	add	x3, x3, #8
+	cmp	x3, x2
+	bne	1b
+2:
+	mov	x0, #1
+	ret
+endfunc memcpy_ns_write
+
+func ns_access_ret_0
+	mov	x0, #0
+	ret
+endfunc ns_access_ret_0
diff --git a/runtime/core/aarch64/run-asm.S b/runtime/core/aarch64/run-asm.S
new file mode 100644
index 0000000..7024b0e
--- /dev/null
+++ b/runtime/core/aarch64/run-asm.S
@@ -0,0 +1,103 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <asm_macros.S>
+#include <rec.h>
+#include <sve.h>
+
+.globl run_realm
+.globl realm_exit
+
+/*
+ * int run_realm(unsigned long *regs);
+ *
+ * Per the AAPCS we must preserve x19-x29, along with the SP. We may freely
+ * corrupt x0-18 and the flags, but need the LR to return to our caller.
+ */
+func run_realm
+	/* Push RMM registers to the stack */
+	sub	sp, sp, #(16 * 6)
+	stp	x19, x20, [sp, #(16 * 0)]
+	stp	x21, x22, [sp, #(16 * 1)]
+	stp	x23, x24, [sp, #(16 * 2)]
+	stp	x25, x26, [sp, #(16 * 3)]
+	stp	x27, x28, [sp, #(16 * 4)]
+	stp	x29, x30, [sp, #(16 * 5)]
+
+	/* Push rec pointer to the stack for realm_exit */
+	stp	x0, xzr, [sp, #-16]!
+
+	/* load realm GPRs (offsetof(rec, rec->regs[0]) == 0) */
+	ldp	x2,  x3,  [x0, #(16 * 1)]
+	ldp	x4,  x5,  [x0, #(16 * 2)]
+	ldp	x6,  x7,  [x0, #(16 * 3)]
+	ldp	x8,  x9,  [x0, #(16 * 4)]
+	ldp	x10, x11, [x0, #(16 * 5)]
+	ldp	x12, x13, [x0, #(16 * 6)]
+	ldp	x14, x15, [x0, #(16 * 7)]
+	ldp	x16, x17, [x0, #(16 * 8)]
+	ldp	x18, x19, [x0, #(16 * 9)]
+	ldp	x20, x21, [x0, #(16 * 10)]
+	ldp	x22, x23, [x0, #(16 * 11)]
+	ldp	x24, x25, [x0, #(16 * 12)]
+	ldp	x26, x27, [x0, #(16 * 13)]
+	ldp	x28, x29, [x0, #(16 * 14)]
+	ldr	x30,      [x0, #(16 * 15)]
+	ldp	x0,  x1,  [x0, #(16 * 0)]
+
+	eret
+	sb
+endfunc run_realm
+
+func realm_exit
+	/*
+	 * We come here with realm's x0 and x1 on the stack and exit_reason in
+	 * x0. See el2_vectors in runtime/core/aarch64/vectors.S.
+	 *
+	 * First, restore realm_gprs ptr to x1
+	 */
+
+	/* Recover the rec pointer */
+	ldr	x1, [sp, #16]
+
+	/* Store realm GPRs (offsetof(rec, rec->regs[0]) == 0) */
+	stp	x2,  x3,  [x1, #(16 * 1)]
+	stp	x4,  x5,  [x1, #(16 * 2)]
+	stp	x6,  x7,  [x1, #(16 * 3)]
+	stp	x8,  x9,  [x1, #(16 * 4)]
+	stp	x10, x11, [x1, #(16 * 5)]
+	stp	x12, x13, [x1, #(16 * 6)]
+	stp	x14, x15, [x1, #(16 * 7)]
+	stp	x16, x17, [x1, #(16 * 8)]
+	stp	x18, x19, [x1, #(16 * 9)]
+	stp	x20, x21, [x1, #(16 * 10)]
+	stp	x22, x23, [x1, #(16 * 11)]
+	stp	x24, x25, [x1, #(16 * 12)]
+	stp	x26, x27, [x1, #(16 * 13)]
+	stp	x28, x29, [x1, #(16 * 14)]
+	str	x30,      [x1, #(16 * 15)]
+
+	/* x0 and x1 as stored by el2_vectors */
+	ldp	x2, x3,	  [sp]
+	stp	x2, x3,   [x1, #(16 * 0)]
+
+	/* Move sp to the realm regs */
+	add	sp, sp, #32
+
+	/*
+	 * Restore the RMM registers from the stack
+	 * including the return address to return to
+	 * after calling run_realm().
+	 */
+	ldp	x19, x20, [sp, #(16 * 0)]
+	ldp	x21, x22, [sp, #(16 * 1)]
+	ldp	x23, x24, [sp, #(16 * 2)]
+	ldp	x25, x26, [sp, #(16 * 3)]
+	ldp	x27, x28, [sp, #(16 * 4)]
+	ldp	x29, x30, [sp, #(16 * 5)]
+	add	sp, sp, #(16 * 6)
+
+	ret
+endfunc realm_exit
diff --git a/runtime/core/aarch64/vectors.S b/runtime/core/aarch64/vectors.S
new file mode 100644
index 0000000..e5cbaf0
--- /dev/null
+++ b/runtime/core/aarch64/vectors.S
@@ -0,0 +1,107 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.section ".text"
+
+	.macro ventry_unused error_message
+	.balign	0x80
+	wfe
+	b	.-4
+	.endm
+
+	.macro ventry label
+		.balign	0x80
+		b	\label
+	.endm
+
+	// VBAR_EL3[10:0] are hardwired to 0, align vector address accordingly
+	.balign 0x800
+
+ENTRY(el2_vectors):
+	ventry_unused	exc_sync_sp0
+	ventry_unused	exc_irq_sp0
+	ventry_unused	exc_fiq_sp0
+	ventry_unused	exc_serror_sp0
+
+	ventry		el2_sync_cel
+	ventry_unused	exc_irq_spx
+	ventry_unused	exc_fiq_spx
+	ventry_unused	exc_serror_spx
+
+	ventry		el2_sync_lel
+	ventry		el2_irq_lel
+	ventry		el2_fiq_lel
+	ventry		el2_serror_lel
+
+	ventry_unused	exc_sync_lel_32
+	ventry_unused	exc_irq_lel_32
+	ventry_unused	exc_fiq_lel_32
+	ventry_unused	exc_serror_lel_32
+ENDPROC(el2_vectors)
+
+el2_sync_lel:
+	stp	x0, x1, [sp, #-16]!
+	mov	x0, #ARM_EXCEPTION_SYNC_LEL
+	b	realm_exit
+ENDPROC(el2_sync_lel)
+
+el2_irq_lel:
+	stp	x0, x1, [sp, #-16]!
+	mov	x0, #ARM_EXCEPTION_IRQ_LEL
+	b	realm_exit
+ENDPROC(el2_sync_lel)
+
+el2_fiq_lel:
+	stp	x0, x1, [sp, #-16]!
+	mov	x0, #ARM_EXCEPTION_FIQ_LEL
+	b	realm_exit
+ENDPROC(el2_sync_lel)
+
+el2_serror_lel:
+	stp	x0, x1, [sp, #-16]!
+	mov	x0, #ARM_EXCEPTION_SERROR_LEL
+	b	realm_exit
+ENDPROC(el2_serror_lel)
+
+el2_sync_cel:
+	stp	x0, x1, [sp, #-16]!
+	stp	x2, x3, [sp, #-16]!
+	stp	x4, x5, [sp, #-16]!
+	stp	x6, x7, [sp, #-16]!
+	stp	x8, x9, [sp, #-16]!
+	stp	x10, x11, [sp, #-16]!
+	stp	x12, x13, [sp, #-16]!
+	stp	x14, x15, [sp, #-16]!
+	stp	x16, x17, [sp, #-16]!
+	stp	x18, xzr, [sp, #-16]!
+	stp	x29, lr, [sp, #-16]!
+
+	bl	handle_rmm_trap
+
+	/*
+	 * If it doesn't panic the RMM, handle_rmm_trap
+	 * returns the new value of PC in x0.
+	 */
+	msr	elr_el2, x0
+
+	ldp	x29, lr, [sp], #16
+	ldp	x18, xzr, [sp], #16
+	ldp	x16, x17, [sp], #16
+	ldp	x14, x15, [sp], #16
+	ldp	x12, x13, [sp], #16
+	ldp	x10, x11, [sp], #16
+	ldp	x8, x9, [sp], #16
+	ldp	x6, x7, [sp], #16
+	ldp	x4, x5, [sp], #16
+	ldp	x2, x3, [sp], #16
+	ldp	x0, x1, [sp], #16
+
+	eret
+	sb
+
+ENDPROC(el2_sync_cel)
diff --git a/runtime/core/exit.c b/runtime/core/exit.c
new file mode 100644
index 0000000..b16fc5a
--- /dev/null
+++ b/runtime/core/exit.c
@@ -0,0 +1,759 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <attestation_token.h>
+#include <buffer.h>
+#include <esr.h>
+#include <exit.h>
+#include <fpu_helpers.h>
+#include <gic.h>
+#include <granule.h>
+#include <inject_exp.h>
+#include <memory_alloc.h>
+#include <psci.h>
+#include <realm.h>
+#include <realm_attest.h>
+#include <rec.h>
+#include <rsi-config.h>
+#include <rsi-handler.h>
+#include <rsi-host-call.h>
+#include <rsi-logger.h>
+#include <rsi-memory.h>
+#include <rsi-walk.h>
+#include <smc-rmi.h>
+#include <smc-rsi.h>
+#include <status.h>
+#include <sve.h>
+#include <sysreg_traps.h>
+#include <table.h>
+
+void save_fpu_state(struct fpu_state *fpu);
+void restore_fpu_state(struct fpu_state *fpu);
+
+static void system_abort(void)
+{
+	/*
+	 * TODO: report the abort to the EL3.
+	 * We need to establish the exact EL3 API first.
+	 */
+	assert(false);
+}
+
+static bool fixup_aarch32_data_abort(struct rec *rec, unsigned long *esr)
+{
+	unsigned long spsr = read_spsr_el2();
+
+	if ((spsr & SPSR_EL2_nRW_AARCH32) != 0UL) {
+		/*
+		 * mmio emulation of AArch32 reads/writes is not supported.
+		 */
+		*esr &= ~ESR_EL2_ABORT_ISV_BIT;
+		return true;
+	}
+	return false;
+}
+
+static unsigned long get_dabt_write_value(struct rec *rec, unsigned long esr)
+{
+	unsigned int rt = esr_srt(esr);
+
+	/* Handle xzr */
+	if (rt == 31U) {
+		return 0UL;
+	}
+	return rec->regs[rt] & access_mask(esr);
+}
+
+/*
+ * Returns 'true' if access from @rec to @addr is within the Protected IPA space.
+ */
+static bool access_in_rec_par(struct rec *rec, unsigned long addr)
+{
+	/*
+	 * It is OK to check only the base address of the access because:
+	 * - The Protected IPA space starts at address zero.
+	 * - The IPA width is below 64 bits, therefore the access cannot
+	 *   wrap around.
+	 */
+	return addr_in_rec_par(rec, addr);
+}
+
+/*
+ * Returns 'true' if the @ipa is in PAR and its RIPAS is 'empty'.
+ *
+ * @ipa must be aligned to the granule size.
+ */
+static bool ipa_is_empty(unsigned long ipa, struct rec *rec)
+{
+	unsigned long s2tte, *ll_table;
+	struct rtt_walk wi;
+	enum ripas ripas;
+	bool ret;
+
+	assert(GRANULE_ALIGNED(ipa));
+
+	if (!addr_in_rec_par(rec, ipa)) {
+		return false;
+	}
+	granule_lock(rec->realm_info.g_rtt, GRANULE_STATE_RTT);
+
+	rtt_walk_lock_unlock(rec->realm_info.g_rtt,
+			     rec->realm_info.s2_starting_level,
+			     rec->realm_info.ipa_bits,
+			     ipa, RTT_PAGE_LEVEL, &wi);
+
+	ll_table = granule_map(wi.g_llt, SLOT_RTT);
+	s2tte = s2tte_read(&ll_table[wi.index]);
+
+	if (s2tte_is_destroyed(s2tte)) {
+		ret = false;
+		goto out_unmap_ll_table;
+	}
+	ripas = s2tte_get_ripas(s2tte);
+	ret = (ripas == RMI_EMPTY);
+
+out_unmap_ll_table:
+	buffer_unmap(ll_table);
+	granule_unlock(wi.g_llt);
+	return ret;
+}
+
+static bool fsc_is_external_abort(unsigned long fsc)
+{
+	if (fsc == ESR_EL2_ABORT_FSC_SEA) {
+		return true;
+	}
+
+	if ((fsc >= ESR_EL2_ABORT_FSC_SEA_TTW_START) &&
+	    (fsc <= ESR_EL2_ABORT_FSC_SEA_TTW_END)) {
+		return true;
+	}
+
+	return false;
+}
+
+/*
+ * Handles Data/Instruction Aborts at a lower EL with External Abort fault
+ * status code (D/IFSC).
+ * Returns 'true' if the exception is the external abort and the `rec_exit`
+ * structure is populated, 'false' otherwise.
+ */
+static bool handle_sync_external_abort(struct rec *rec,
+				       struct rmi_rec_exit *rec_exit,
+				       unsigned long esr)
+{
+	unsigned long fsc = esr & ESR_EL2_ABORT_FSC_MASK;
+	unsigned long set = esr & ESR_EL2_ABORT_SET_MASK;
+
+	if (!fsc_is_external_abort(fsc)) {
+		return false;
+	}
+
+	switch (set) {
+	case ESR_EL2_ABORT_SET_UER:
+		/*
+		 * The recoverable SEA.
+		 * Inject the sync. abort into the Realm.
+		 * Report the exception to the host.
+		 */
+		inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
+		/*
+		 * Fall through.
+		 */
+	case ESR_EL2_ABORT_SET_UEO:
+		/*
+		 * The restartable SEA.
+		 * Report the exception to the host.
+		 * The REC restarts the same instruction.
+		 */
+		rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
+
+		/*
+		 * The value of the HPFAR_EL2 is not provided to the host as
+		 * it is undefined for external aborts.
+		 *
+		 * We also don't provide the content of FAR_EL2 because it
+		 * has no practical value to the host without the HPFAR_EL2.
+		 */
+		break;
+	case ESR_EL2_ABORT_SET_UC:
+		/*
+		 * The uncontainable SEA.
+		 * Fatal to the system.
+		 */
+		system_abort();
+		break;
+	default:
+		assert(false);
+	}
+
+	return true;
+}
+
+void emulate_stage2_data_abort(struct rec *rec,
+			       struct rmi_rec_exit *rec_exit,
+			       unsigned long rtt_level)
+{
+	unsigned long fipa = rec->regs[1];
+
+	assert(rtt_level <= RTT_PAGE_LEVEL);
+
+	/*
+	 * Setup Exception Syndrom Register to emulate a real data abort
+	 * and return to NS host to handle it.
+	 */
+	rec_exit->esr = (ESR_EL2_EC_DATA_ABORT |
+			(ESR_EL2_ABORT_FSC_TRANSLATION_FAULT_L0 + rtt_level));
+	rec_exit->far = 0UL;
+	rec_exit->hpfar = fipa >> HPFAR_EL2_FIPA_OFFSET;
+	rec_exit->exit_reason = RMI_EXIT_SYNC;
+}
+
+/*
+ * Returns 'true' if the abort is handled and the RMM should return to the Realm,
+ * and returns 'false' if the exception should be reported to the HS host.
+ */
+static bool handle_data_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
+			      unsigned long esr)
+{
+	unsigned long far = 0UL;
+	unsigned long hpfar = read_hpfar_el2();
+	unsigned long fipa = (hpfar & HPFAR_EL2_FIPA_MASK) << HPFAR_EL2_FIPA_OFFSET;
+	unsigned long write_val = 0UL;
+
+	if (handle_sync_external_abort(rec, rec_exit, esr)) {
+		/*
+		 * All external aborts are immediately reported to the host.
+		 */
+		return false;
+	}
+
+	/*
+	 * The memory access that crosses a page boundary may cause two aborts
+	 * with `hpfar_el2` values referring to two consecutive pages.
+	 *
+	 * Insert the SEA and return to the Realm if the granule's RIPAS is EMPTY.
+	 */
+	if (ipa_is_empty(fipa, rec)) {
+		inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
+		return true;
+	}
+
+	if (fixup_aarch32_data_abort(rec, &esr) ||
+	    access_in_rec_par(rec, fipa)) {
+		esr &= ESR_NONEMULATED_ABORT_MASK;
+		goto end;
+	}
+
+	if (esr_is_write(esr)) {
+		write_val = get_dabt_write_value(rec, esr);
+	}
+
+	far = read_far_el2() & ~GRANULE_MASK;
+	esr &= ESR_EMULATED_ABORT_MASK;
+
+end:
+	rec_exit->esr = esr;
+	rec_exit->far = far;
+	rec_exit->hpfar = hpfar;
+	rec_exit->gprs[0] = write_val;
+
+	return false;
+}
+
+/*
+ * Returns 'true' if the abort is handled and the RMM should return to the Realm,
+ * and returns 'false' if the exception should be reported to the NS host.
+ */
+static bool handle_instruction_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
+				     unsigned long esr)
+{
+	unsigned long fsc = esr & ESR_EL2_ABORT_FSC_MASK;
+	unsigned long fsc_type = fsc & ~ESR_EL2_ABORT_FSC_LEVEL_MASK;
+	unsigned long hpfar = read_hpfar_el2();
+	unsigned long fipa = (hpfar & HPFAR_EL2_FIPA_MASK) << HPFAR_EL2_FIPA_OFFSET;
+
+	if (handle_sync_external_abort(rec, rec_exit, esr)) {
+		/*
+		 * All external aborts are immediately reported to the host.
+		 */
+		return false;
+	}
+
+	/*
+	 * Insert the SEA and return to the Realm if:
+	 * - The instruction abort is at an Unprotected IPA, or
+	 * - The granule's RIPAS is EMPTY
+	 */
+	if (!access_in_rec_par(rec, fipa) || ipa_is_empty(fipa, rec)) {
+		inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
+		return true;
+	}
+
+	if (fsc_type != ESR_EL2_ABORT_FSC_TRANSLATION_FAULT) {
+		unsigned long far = read_far_el2();
+
+		/*
+		 * TODO: Should this ever happen, or is it an indication of an
+		 * internal consistency failure in the RMM which should lead
+		 * to a panic instead?
+		 */
+
+		ERROR("Unhandled instruction abort:\n");
+		ERROR("    FSC: %12s0x%02lx\n", " ", fsc);
+		ERROR("    FAR: %16lx\n", far);
+		ERROR("  HPFAR: %16lx\n", hpfar);
+		return false;
+	}
+
+	rec_exit->hpfar = hpfar;
+	rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
+
+	return false;
+}
+
+/*
+ * Return 'false' if no IRQ is pending,
+ * return 'true' if there is an IRQ pending, and need to return to host.
+ */
+static bool check_pending_irq(void)
+{
+	unsigned long pending_irq;
+
+	pending_irq = read_isr_el1();
+
+	return (pending_irq != 0UL);
+}
+
+static void advance_pc(void)
+{
+	unsigned long pc = read_elr_el2();
+
+	write_elr_el2(pc + 4UL);
+}
+
+static void return_result_to_realm(struct rec *rec, struct smc_result result)
+{
+	rec->regs[0] = result.x[0];
+	rec->regs[1] = result.x[1];
+	rec->regs[2] = result.x[2];
+	rec->regs[3] = result.x[3];
+}
+
+/*
+ * Return 'true' if execution should continue in the REC, otherwise return
+ * 'false' to go back to the NS caller of REC.Enter.
+ */
+static bool handle_realm_rsi(struct rec *rec, struct rmi_rec_exit *rec_exit)
+{
+	bool ret_to_rec = true;	/* Return to Realm */
+	unsigned int function_id = rec->regs[0];
+
+	RSI_LOG_SET(rec->regs[1], rec->regs[2],
+		    rec->regs[3], rec->regs[4], rec->regs[5]);
+
+	if (!IS_SMC32_PSCI_FID(function_id) && !IS_SMC64_PSCI_FID(function_id)
+	    && !IS_SMC64_RSI_FID(function_id)) {
+
+		ERROR("Invalid RSI function_id = %x\n", function_id);
+		rec->regs[0] = SMC_UNKNOWN;
+		return true;
+	}
+
+	switch (function_id) {
+	case SMCCC_VERSION:
+		rec->regs[0] = SMCCC_VERSION_NUMBER;
+		break;
+	case SMC_RSI_ABI_VERSION:
+		rec->regs[0] = system_rsi_abi_version();
+		break;
+	case SMC32_PSCI_FID_MIN ... SMC32_PSCI_FID_MAX:
+	case SMC64_PSCI_FID_MIN ... SMC64_PSCI_FID_MAX: {
+		struct psci_result res;
+
+		res = psci_rsi(rec,
+			       function_id,
+			       rec->regs[1],
+			       rec->regs[2],
+			       rec->regs[3]);
+
+		if (!rec->psci_info.pending) {
+			rec->regs[0] = res.smc_res.x[0];
+			rec->regs[1] = res.smc_res.x[1];
+			rec->regs[2] = res.smc_res.x[2];
+			rec->regs[3] = res.smc_res.x[3];
+		}
+
+		if (res.hvc_forward.forward_psci_call) {
+			unsigned int i;
+
+			rec_exit->exit_reason = RMI_EXIT_PSCI;
+			rec_exit->gprs[0] = function_id;
+			rec_exit->gprs[1] = res.hvc_forward.x1;
+			rec_exit->gprs[2] = res.hvc_forward.x2;
+			rec_exit->gprs[3] = res.hvc_forward.x3;
+
+			for (i = 4U; i < REC_EXIT_NR_GPRS; i++) {
+				rec_exit->gprs[i] = 0UL;
+			}
+
+			advance_pc();
+			ret_to_rec = false;
+		}
+		break;
+	}
+	case SMC_RSI_ATTEST_TOKEN_INIT:
+		rec->regs[0] = handle_rsi_attest_token_init(rec);
+		break;
+	case SMC_RSI_ATTEST_TOKEN_CONTINUE: {
+		struct attest_result res;
+		attest_realm_token_sign_continue_start();
+		while (true) {
+			/*
+			 * Possible outcomes:
+			 *     if res.incomplete is true
+			 *         if IRQ pending
+			 *             check for pending IRQ and return to host
+			 *         else try a new iteration
+			 *     else
+			 *         if RTT table walk has failed,
+			 *             emulate data abort back to host
+			 *         otherwise
+			 *             return to realm because the token
+			 *             creation is complete or input parameter
+			 *             validation failed.
+			 */
+			handle_rsi_attest_token_continue(rec, &res);
+
+			if (res.incomplete) {
+				if (check_pending_irq()) {
+					rec_exit->exit_reason = RMI_EXIT_IRQ;
+					/* Return to NS host to handle IRQ. */
+					ret_to_rec = false;
+					break;
+				}
+			} else {
+				if (res.walk_result.abort) {
+					emulate_stage2_data_abort(
+						rec, rec_exit,
+						res.walk_result.rtt_level);
+					ret_to_rec = false; /* Exit to Host */
+					break;
+				}
+
+				/* Return to Realm */
+				return_result_to_realm(rec, res.smc_res);
+				break;
+			}
+		}
+		attest_realm_token_sign_continue_finish();
+		break;
+	}
+	case SMC_RSI_MEASUREMENT_READ:
+		rec->regs[0] = handle_rsi_read_measurement(rec);
+		break;
+	case SMC_RSI_MEASUREMENT_EXTEND:
+		rec->regs[0] = handle_rsi_extend_measurement(rec);
+		break;
+	case SMC_RSI_REALM_CONFIG: {
+		struct rsi_config_result res;
+
+		res = handle_rsi_realm_config(rec);
+		if (res.walk_result.abort) {
+			emulate_stage2_data_abort(rec, rec_exit,
+						  res.walk_result.rtt_level);
+			ret_to_rec = false; /* Exit to Host */
+		} else {
+			/* Return to Realm */
+			return_result_to_realm(rec, res.smc_res);
+		}
+		break;
+	}
+	case SMC_RSI_IPA_STATE_SET:
+		if (handle_rsi_ipa_state_set(rec, rec_exit)) {
+			rec->regs[0] = RSI_ERROR_INPUT;
+		} else {
+			advance_pc();
+			ret_to_rec = false; /* Return to Host */
+		}
+		break;
+	case SMC_RSI_IPA_STATE_GET: {
+		enum ripas ripas;
+
+		rec->regs[0] = handle_rsi_ipa_state_get(rec, rec->regs[1],
+							&ripas);
+		if (rec->regs[0] == RSI_SUCCESS) {
+			rec->regs[1] = ripas;
+		}
+		break;
+	}
+	case SMC_RSI_HOST_CALL: {
+		struct rsi_host_call_result res;
+
+		res = handle_rsi_host_call(rec, rec_exit);
+
+		if (res.walk_result.abort) {
+			emulate_stage2_data_abort(rec, rec_exit,
+						  res.walk_result.rtt_level);
+		} else {
+			rec->regs[0] = res.smc_result;
+
+			/*
+			 * Return to Realm in case of error,
+			 * parent function calls advance_pc()
+			 */
+			if (rec->regs[0] == RSI_SUCCESS) {
+				advance_pc();
+
+				/* Exit to Host */
+				rec->host_call = true;
+				rec_exit->exit_reason = RMI_EXIT_HOST_CALL;
+				ret_to_rec = false;
+			}
+		}
+		break;
+	}
+
+	default:
+		rec->regs[0] = SMC_UNKNOWN;
+		break;
+	}
+
+	/* Log RSI call */
+	RSI_LOG_EXIT(function_id, rec->regs[0], ret_to_rec);
+	return ret_to_rec;
+}
+
+/*
+ * Return 'true' if the RMM handled the exception,
+ * 'false' to return to the Non-secure host.
+ */
+static bool handle_exception_sync(struct rec *rec, struct rmi_rec_exit *rec_exit)
+{
+	const unsigned long esr = read_esr_el2();
+
+	switch (esr & ESR_EL2_EC_MASK) {
+	case ESR_EL2_EC_WFX:
+		rec_exit->esr = esr & (ESR_EL2_EC_MASK | ESR_EL2_WFx_TI_BIT);
+		advance_pc();
+		return false;
+	case ESR_EL2_EC_HVC:
+		realm_inject_undef_abort();
+		return true;
+	case ESR_EL2_EC_SMC:
+		if (!handle_realm_rsi(rec, rec_exit)) {
+			return false;
+		}
+		/*
+		 * Advance PC.
+		 * HCR_EL2.TSC traps execution of the SMC instruction.
+		 * It is not a routing control for the SMC exception.
+		 * Trap exceptions and SMC exceptions have different
+		 * preferred return addresses.
+		 */
+		advance_pc();
+		return true;
+	case ESR_EL2_EC_SYSREG: {
+		bool ret = handle_sysreg_access_trap(rec, rec_exit, esr);
+
+		advance_pc();
+		return ret;
+	}
+	case ESR_EL2_EC_INST_ABORT:
+		return handle_instruction_abort(rec, rec_exit, esr);
+	case ESR_EL2_EC_DATA_ABORT:
+		return handle_data_abort(rec, rec_exit, esr);
+	case ESR_EL2_EC_FPU: {
+		unsigned long cptr;
+
+		/*
+		 * Realm has requested FPU/SIMD access, so save NS state and
+		 * load realm state.  Start by disabling traps so we can save
+		 * the NS state and load the realm state.
+		 */
+		cptr = read_cptr_el2();
+		cptr &= ~(CPTR_EL2_FPEN_MASK << CPTR_EL2_FPEN_SHIFT);
+		cptr |= (CPTR_EL2_FPEN_NO_TRAP_11 << CPTR_EL2_FPEN_SHIFT);
+		cptr &= ~(CPTR_EL2_ZEN_MASK << CPTR_EL2_ZEN_SHIFT);
+		cptr |= (CPTR_EL2_ZEN_NO_TRAP_11 << CPTR_EL2_ZEN_SHIFT);
+		write_cptr_el2(cptr);
+
+		/*
+		 * Save NS state, restore realm state, and set flag indicating
+		 * realm has used FPU so we know to save and restore NS state at
+		 * realm exit.
+		 */
+		if (rec->ns->sve != NULL) {
+			save_sve_state(rec->ns->sve);
+		} else {
+			assert(rec->ns->fpu != NULL);
+			fpu_save_state(rec->ns->fpu);
+		}
+		fpu_restore_state(&rec->fpu_ctx.fpu);
+		rec->fpu_ctx.used = true;
+
+		/*
+		 * Disable SVE for now, until per rec save/restore is
+		 * implemented
+		 */
+		cptr = read_cptr_el2();
+		cptr &= ~(CPTR_EL2_ZEN_MASK << CPTR_EL2_ZEN_SHIFT);
+		cptr |= (CPTR_EL2_ZEN_TRAP_ALL_00 << CPTR_EL2_ZEN_SHIFT);
+		write_cptr_el2(cptr);
+
+		/*
+		 * Return 'true' indicating that this exception
+		 * has been handled and execution can continue.
+		 */
+		return true;
+	}
+	default:
+		/*
+		 * TODO: Check if there are other exit reasons we could
+		 * encounter here and handle them appropriately
+		 */
+		break;
+	}
+
+	VERBOSE("Unhandled sync exit ESR: %08lx (EC: %lx ISS: %lx)\n",
+		esr,
+		(esr & ESR_EL2_EC_MASK) >> ESR_EL2_EC_SHIFT,
+		(esr & ESR_EL2_ISS_MASK) >> ESR_EL2_ISS_SHIFT);
+
+	/*
+	 * Zero values in esr, far & hpfar of 'rec_exit' structure
+	 * will be returned to the NS host.
+	 * The only information that may leak is when there was
+	 * some unhandled/unknown reason for the exception.
+	 */
+	return false;
+}
+
+/*
+ * Return 'true' if the RMM handled the exception, 'false' to return to the
+ * Non-secure host.
+ */
+static bool handle_exception_serror_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
+{
+	const unsigned long esr = read_esr_el2();
+
+	if (esr & ESR_EL2_SERROR_IDS_BIT) {
+		/*
+		 * Implementation defined content of the esr.
+		 */
+		system_abort();
+	}
+
+	if ((esr & ESR_EL2_SERROR_DFSC_MASK) != ESR_EL2_SERROR_DFSC_ASYNC) {
+		/*
+		 * Either Uncategorized or Reserved fault status code.
+		 */
+		system_abort();
+	}
+
+	switch (esr & ESR_EL2_SERROR_AET_MASK) {
+	case ESR_EL2_SERROR_AET_UEU:	/* Unrecoverable RAS Error */
+	case ESR_EL2_SERROR_AET_UER:	/* Recoverable RAS Error */
+		/*
+		 * The abort is fatal to the current S/W. Inject the SError into
+		 * the Realm so it can e.g. shut down gracefully or localize the
+		 * problem at the specific EL0 application.
+		 *
+		 * Note: Consider shutting down the Realm here to avoid
+		 * the host's attack on unstable Realms.
+		 */
+		inject_serror(rec, esr);
+		/*
+		 * Fall through.
+		 */
+	case ESR_EL2_SERROR_AET_CE:	/* Corrected RAS Error */
+	case ESR_EL2_SERROR_AET_UEO:	/* Restartable RAS Error */
+		/*
+		 * Report the exception to the host.
+		 */
+		rec_exit->esr = esr & ESR_SERROR_MASK;
+		break;
+	case ESR_EL2_SERROR_AET_UC:	/* Uncontainable RAS Error */
+		system_abort();
+		break;
+	default:
+		/*
+		 * Unrecognized Asynchronous Error Type
+		 */
+		assert(false);
+	}
+
+	return false;
+}
+
+static bool handle_exception_irq_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
+{
+	(void)rec;
+
+	rec_exit->exit_reason = RMI_EXIT_IRQ;
+
+	/*
+	 * With GIC all virtual interrupt programming
+	 * must go via the NS hypervisor.
+	 */
+	return false;
+}
+
+/* Returns 'true' when returning to Realm (S) and false when to NS */
+bool handle_realm_exit(struct rec *rec, struct rmi_rec_exit *rec_exit, int exception)
+{
+	switch (exception) {
+	case ARM_EXCEPTION_SYNC_LEL: {
+		bool ret;
+
+		/*
+		 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
+		 * information.
+		 */
+		rec_exit->exit_reason = RMI_EXIT_SYNC;
+		ret = handle_exception_sync(rec, rec_exit);
+		if (!ret) {
+			rec->last_run_info.esr = read_esr_el2();
+			rec->last_run_info.far = read_far_el2();
+			rec->last_run_info.hpfar = read_hpfar_el2();
+		}
+		return ret;
+
+		/*
+		 * TODO: Much more detailed handling of exit reasons.
+		 */
+	}
+	case ARM_EXCEPTION_IRQ_LEL:
+		return handle_exception_irq_lel(rec, rec_exit);
+	case ARM_EXCEPTION_FIQ_LEL:
+		rec_exit->exit_reason = RMI_EXIT_FIQ;
+		break;
+	case ARM_EXCEPTION_SERROR_LEL: {
+		const unsigned long esr = read_esr_el2();
+		bool ret;
+
+		/*
+		 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
+		 * information.
+		 */
+		rec_exit->exit_reason = RMI_EXIT_SERROR;
+		ret = handle_exception_serror_lel(rec, rec_exit);
+		if (!ret) {
+			rec->last_run_info.esr = esr;
+			rec->last_run_info.far = read_far_el2();
+			rec->last_run_info.hpfar = read_hpfar_el2();
+		}
+		return ret;
+	}
+	default:
+		INFO("Unrecognized exit reason: %d\n", exception);
+		break;
+	};
+
+	return false;
+}
diff --git a/runtime/core/fake_host/runtime_core_stub.c b/runtime/core/fake_host/runtime_core_stub.c
new file mode 100644
index 0000000..e71ca2e
--- /dev/null
+++ b/runtime/core/fake_host/runtime_core_stub.c
@@ -0,0 +1,23 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <host_harness.h>
+#include <run.h>
+
+bool memcpy_ns_read(void *dest, const void *ns_src, unsigned long size)
+{
+	return host_memcpy_ns_read(dest, ns_src, size);
+}
+
+
+bool memcpy_ns_write(void *ns_dest, const void *src, unsigned long size)
+{
+	return host_memcpy_ns_write(ns_dest, src, size);
+}
+
+int run_realm(unsigned long *regs)
+{
+	return host_run_realm(regs);
+}
diff --git a/runtime/core/handler.c b/runtime/core/handler.c
new file mode 100644
index 0000000..b7f3a55
--- /dev/null
+++ b/runtime/core/handler.c
@@ -0,0 +1,388 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <buffer.h>
+#include <debug.h>
+#include <sizes.h>
+#include <smc-handler.h>
+#include <smc-rmi.h>
+#include <smc.h>
+#include <status.h>
+#include <utils_def.h>
+
+#define STATUS_HANDLER(_id)[_id] = #_id
+
+const char *status_handler[] = {
+	STATUS_HANDLER(RMI_SUCCESS),
+	STATUS_HANDLER(RMI_ERROR_INPUT),
+	STATUS_HANDLER(RMI_ERROR_REALM),
+	STATUS_HANDLER(RMI_ERROR_REC),
+	STATUS_HANDLER(RMI_ERROR_RTT),
+	STATUS_HANDLER(RMI_ERROR_IN_USE)
+};
+COMPILER_ASSERT(ARRAY_LEN(status_handler) == RMI_ERROR_COUNT);
+
+/*
+ * At this level (in handle_ns_smc) we distinguish the RMI calls only on:
+ * - The number of input arguments [0..4], and whether
+ * - The function returns up to three output values in addition
+ *   to the return status code.
+ * Hence, the naming syntax is:
+ * - `*_[0..4]` when no output values are returned, and
+ * - `*_[0..4]_o` when the function returns some output values.
+ */
+
+typedef unsigned long (*handler_0)(void);
+typedef unsigned long (*handler_1)(unsigned long arg0);
+typedef unsigned long (*handler_2)(unsigned long arg0, unsigned long arg1);
+typedef unsigned long (*handler_3)(unsigned long arg0, unsigned long arg1,
+				   unsigned long arg2);
+typedef unsigned long (*handler_4)(unsigned long arg0, unsigned long arg1,
+				   unsigned long arg2, unsigned long arg3);
+typedef unsigned long (*handler_5)(unsigned long arg0, unsigned long arg1,
+				   unsigned long arg2, unsigned long arg3,
+				   unsigned long arg4);
+typedef void (*handler_1_o)(unsigned long arg0, struct smc_result *ret);
+typedef void (*handler_3_o)(unsigned long arg0, unsigned long arg1,
+			    unsigned long arg2, struct smc_result *ret);
+
+enum rmi_type {
+	rmi_type_0,
+	rmi_type_1,
+	rmi_type_2,
+	rmi_type_3,
+	rmi_type_4,
+	rmi_type_5,
+	rmi_type_1_o,
+	rmi_type_3_o
+};
+
+struct smc_handler {
+	const char	*fn_name;
+	enum rmi_type	type;
+	union {
+		handler_0	f0;
+		handler_1	f1;
+		handler_2	f2;
+		handler_3	f3;
+		handler_4	f4;
+		handler_5	f5;
+		handler_1_o	f1_o;
+		handler_3_o	f3_o;
+		void		*fn_dummy;
+	};
+	bool		log_exec;	/* print handler execution */
+	bool		log_error;	/* print in case of error status */
+	unsigned int	out_values;	/* number of output values */
+};
+
+/*
+ * Get handler ID from FID
+ * Precondition: FID is an RMI call
+ */
+#define SMC_RMI_HANDLER_ID(_fid) SMC64_FID_OFFSET_FROM_RANGE_MIN(RMI, _fid)
+
+#define HANDLER_0(_id, _fn, _exec, _error)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_0, .f0 = _fn, .log_exec = _exec, .log_error = _error,	   \
+	.out_values = 0U }
+#define HANDLER_1(_id, _fn, _exec, _error)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_1, .f1 = _fn, .log_exec = _exec, .log_error = _error,	   \
+	.out_values = 0U }
+#define HANDLER_2(_id, _fn, _exec, _error)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_2, .f2 = _fn, .log_exec = _exec, .log_error = _error,     \
+	.out_values = 0U }
+#define HANDLER_3(_id, _fn, _exec, _error)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_3, .f3 = _fn, .log_exec = _exec, .log_error = _error,	   \
+	.out_values = 0U }
+#define HANDLER_4(_id, _fn, _exec, _error)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_4, .f4 = _fn, .log_exec = _exec, .log_error = _error,	   \
+	.out_values = 0U }
+#define HANDLER_5(_id, _fn, _exec, _error)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_5, .f5 = _fn, .log_exec = _exec, .log_error = _error,	   \
+	.out_values = 0U }
+#define HANDLER_1_O(_id, _fn, _exec, _error, _values)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_1_o, .f1_o = _fn, .log_exec = _exec, .log_error = _error, \
+	.out_values = _values }
+#define HANDLER_3_O(_id, _fn, _exec, _error, _values)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_3_o, .f3_o = _fn, .log_exec = _exec, .log_error = _error, \
+	.out_values = _values }
+
+/*
+ * The 3rd value enables the execution log.
+ * The 4th value enables the error log.
+ */
+static const struct smc_handler smc_handlers[] = {
+	HANDLER_0(SMC_RMM_VERSION,		 smc_version,			true,  true),
+	HANDLER_1_O(SMC_RMM_FEATURES,		 smc_read_feature_register,	true,  true, 1U),
+	HANDLER_1(SMC_RMM_GRANULE_DELEGATE,	 smc_granule_delegate,		false, true),
+	HANDLER_1(SMC_RMM_GRANULE_UNDELEGATE,	 smc_granule_undelegate,	false, true),
+	HANDLER_2(SMC_RMM_REALM_CREATE,		 smc_realm_create,		true,  true),
+	HANDLER_1(SMC_RMM_REALM_DESTROY,	 smc_realm_destroy,		true,  true),
+	HANDLER_1(SMC_RMM_REALM_ACTIVATE,	 smc_realm_activate,		true,  true),
+	HANDLER_3(SMC_RMM_REC_CREATE,		 smc_rec_create,		true,  true),
+	HANDLER_1(SMC_RMM_REC_DESTROY,		 smc_rec_destroy,		true,  true),
+	HANDLER_2(SMC_RMM_REC_ENTER,		 smc_rec_enter,			false, true),
+	HANDLER_5(SMC_RMM_DATA_CREATE,		 smc_data_create,		false, false),
+	HANDLER_3(SMC_RMM_DATA_CREATE_UNKNOWN,	 smc_data_create_unknown,	false, false),
+	HANDLER_2(SMC_RMM_DATA_DESTROY,		 smc_data_destroy,		false, true),
+	HANDLER_4(SMC_RMM_RTT_CREATE,		 smc_rtt_create,		false, true),
+	HANDLER_4(SMC_RMM_RTT_DESTROY,		 smc_rtt_destroy,		false, true),
+	HANDLER_4(SMC_RMM_RTT_FOLD,		 smc_rtt_fold,			false, true),
+	HANDLER_4(SMC_RMM_RTT_MAP_UNPROTECTED,	 smc_rtt_map_unprotected,	false, false),
+	HANDLER_3(SMC_RMM_RTT_UNMAP_UNPROTECTED, smc_rtt_unmap_unprotected,	false, false),
+	HANDLER_3_O(SMC_RMM_RTT_READ_ENTRY,	 smc_rtt_read_entry,		false, true, 4U),
+	HANDLER_2(SMC_RMM_PSCI_COMPLETE,	 smc_psci_complete,		true,  true),
+	HANDLER_1_O(SMC_RMM_REC_AUX_COUNT,	 smc_rec_aux_count,		true,  true, 1U),
+	HANDLER_3(SMC_RMM_RTT_INIT_RIPAS,	 smc_rtt_init_ripas,		false, true),
+	HANDLER_5(SMC_RMM_RTT_SET_RIPAS,	 smc_rtt_set_ripas,		false, true)
+};
+
+COMPILER_ASSERT(ARRAY_LEN(smc_handlers) == SMC64_NUM_FIDS_IN_RANGE(RMI));
+
+static bool rmi_call_log_enabled = true;
+
+static void rmi_log_on_exit(unsigned long handler_id,
+			    unsigned long arg0,
+			    unsigned long arg1,
+			    unsigned long arg2,
+			    unsigned long arg3,
+			    unsigned long arg4,
+			    struct smc_result *ret)
+{
+	const struct smc_handler *handler = &smc_handlers[handler_id];
+	unsigned long function_id = SMC64_RMI_FID(handler_id);
+	unsigned int i;
+	return_code_t rc;
+
+	if (!handler->log_exec && !handler->log_error) {
+		return;
+	}
+
+	if (function_id == SMC_RMM_VERSION) {
+		/*
+		 * RMM_VERSION is special because it returns the
+		 * version number, not the error code.
+		 */
+		INFO("%-29s %8lx %8lx %8lx %8lx %8lx > %lx\n",
+		     handler->fn_name, arg0, arg1, arg2, arg3, arg4,
+		     ret->x[0]);
+		return;
+	}
+
+	rc = unpack_return_code(ret->x[0]);
+
+	if ((handler->log_exec) ||
+	    (handler->log_error && (rc.status != RMI_SUCCESS))) {
+		INFO("%-29s %8lx %8lx %8lx %8lx %8lx > ",
+			handler->fn_name, arg0, arg1, arg2, arg3, arg4);
+		if (rc.status >= RMI_ERROR_COUNT) {
+			INFO("%lx", ret->x[0]);
+		} else {
+			INFO("%s", status_handler[rc.status]);
+		}
+
+		/* Check for index */
+		if (((function_id == SMC_RMM_REC_ENTER) &&
+		     (rc.status == RMI_ERROR_REALM)) ||
+		     (rc.status == RMI_ERROR_RTT)) {
+			INFO(" %x", rc.index);
+		}
+
+		/* Print output values */
+		for (i = 1U; i <= handler->out_values; i++) {
+			INFO(" %8lx", ret->x[i]);
+		}
+
+		INFO("\n");
+	}
+}
+
+void handle_ns_smc(unsigned long function_id,
+		   unsigned long arg0,
+		   unsigned long arg1,
+		   unsigned long arg2,
+		   unsigned long arg3,
+		   unsigned long arg4,
+		   unsigned long arg5,
+		   struct smc_result *ret)
+{
+	unsigned long handler_id;
+	const struct smc_handler *handler = NULL;
+
+	if (IS_SMC64_RMI_FID(function_id)) {
+		handler_id = SMC_RMI_HANDLER_ID(function_id);
+		if (handler_id < ARRAY_LEN(smc_handlers)) {
+			handler = &smc_handlers[handler_id];
+		}
+	}
+
+	/*
+	 * Check if handler exists and 'fn_dummy' is not NULL
+	 * for not implemented 'function_id' calls in SMC RMI range.
+	 */
+	if ((handler == NULL) || (handler->fn_dummy == NULL)) {
+		VERBOSE("[%s] unknown function_id: %lx\n",
+			__func__, function_id);
+		ret->x[0] = SMC_UNKNOWN;
+		return;
+	}
+
+	assert_cpu_slots_empty();
+
+	switch (handler->type) {
+	case rmi_type_0:
+		ret->x[0] = handler->f0();
+		break;
+	case rmi_type_1:
+		ret->x[0] = handler->f1(arg0);
+		break;
+	case rmi_type_2:
+		ret->x[0] = handler->f2(arg0, arg1);
+		break;
+	case rmi_type_3:
+		ret->x[0] = handler->f3(arg0, arg1, arg2);
+		break;
+	case rmi_type_4:
+		ret->x[0] = handler->f4(arg0, arg1, arg2, arg3);
+		break;
+	case rmi_type_5:
+		ret->x[0] = handler->f5(arg0, arg1, arg2, arg3, arg4);
+		break;
+	case rmi_type_1_o:
+		handler->f1_o(arg0, ret);
+		break;
+	case rmi_type_3_o:
+		handler->f3_o(arg0, arg1, arg2, ret);
+		break;
+	default:
+		assert(false);
+	}
+
+	if (rmi_call_log_enabled) {
+		rmi_log_on_exit(handler_id, arg0, arg1, arg2, arg3, arg4, ret);
+	}
+
+	assert_cpu_slots_empty();
+}
+
+static void report_unexpected(void)
+{
+	unsigned long spsr = read_spsr_el2();
+	unsigned long esr = read_esr_el2();
+	unsigned long elr = read_elr_el2();
+	unsigned long far = read_far_el2();
+
+	INFO("----\n");
+	INFO("Unexpected exception:\n");
+	INFO("SPSR_EL2: 0x%016lx\n", spsr);
+	INFO("ESR_EL2:  0x%016lx\n", esr);
+	INFO("ELR_EL2:  0x%016lx\n", elr);
+	INFO("FAR_EL2:  0x%016lx\n", far);
+	INFO("----\n");
+
+}
+
+unsigned long handle_realm_trap(unsigned long *regs)
+{
+	report_unexpected();
+
+	while (1) {
+		wfe();
+	}
+}
+
+/*
+ * Identifies an abort that the RMM may recover from.
+ */
+struct rmm_trap_element {
+	/*
+	 * The PC at the time of abort.
+	 */
+	unsigned long aborted_pc;
+	/*
+	 * New value of the PC.
+	 */
+	unsigned long new_pc;
+};
+
+#define RMM_TRAP_HANDLER(_aborted_pc, _new_pc) \
+	{ .aborted_pc = (unsigned long)(&_aborted_pc), \
+	  .new_pc = (unsigned long)(&_new_pc) }
+
+/*
+ * The registered locations of load/store instructions that access NS memory.
+ */
+extern void *ns_read;
+extern void *ns_write;
+
+/*
+ * The new value of the PC when the GPF occurs on a registered location.
+ */
+extern void *ns_access_ret_0;
+
+struct rmm_trap_element rmm_trap_list[] = {
+	RMM_TRAP_HANDLER(ns_read, ns_access_ret_0),
+	RMM_TRAP_HANDLER(ns_write, ns_access_ret_0),
+};
+#define RMM_TRAP_LIST_SIZE (sizeof(rmm_trap_list)/sizeof(struct rmm_trap_element))
+
+static void fatal_abort(void)
+{
+	report_unexpected();
+
+	while (1) {
+		wfe();
+	}
+}
+
+static bool is_el2_data_abort_gpf(unsigned long esr)
+{
+	if (((esr & ESR_EL2_EC_MASK) == ESR_EL2_EC_DATA_ABORT_SEL) &&
+	    ((esr & ESR_EL2_ABORT_FSC_MASK) == ESR_EL2_ABORT_FSC_GPF))
+		return true;
+	return false;
+}
+
+/*
+ * Handles the RMM's aborts.
+ * It compares the PC at the time of the abort with the registered addresses.
+ * If it finds a match, it returns the new value of the PC that the RMM should
+ * continue from. Other register values are preserved.
+ * If no match is found, it aborts the RMM.
+ */
+unsigned long handle_rmm_trap(void)
+{
+	int i;
+
+	unsigned long esr = read_esr_el2();
+	unsigned long elr = read_elr_el2();
+
+	/*
+	 * Only the GPF data aborts are recoverable.
+	 */
+	if (!is_el2_data_abort_gpf(esr)) {
+		fatal_abort();
+	}
+
+	for (i = 0; i < RMM_TRAP_LIST_SIZE; i++) {
+		if (rmm_trap_list[i].aborted_pc == elr) {
+			return rmm_trap_list[i].new_pc;
+		}
+	}
+
+	fatal_abort();
+	return 0;
+}
diff --git a/runtime/core/init.c b/runtime/core/init.c
new file mode 100644
index 0000000..ad86fbc
--- /dev/null
+++ b/runtime/core/init.c
@@ -0,0 +1,88 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch_helpers.h>
+#include <attestation.h>
+#include <buffer.h>
+#include <debug.h>
+#include <rmm_el3_ifc.h>
+#include <smc-rmi.h>
+#include <smc-rsi.h>
+
+#ifdef NDEBUG
+#define RMM_BUILD_TYPE	"release"
+#else
+#define RMM_BUILD_TYPE	"debug"
+#endif
+
+#define VER_STRING(toolchain, major, minor, patch) \
+		toolchain __STRING(major) "." \
+		__STRING(minor) "." __STRING(patch)
+
+static void rmm_arch_init(void)
+{
+	MPAM(write_mpam2_el2(MPAM2_EL2_INIT));
+	MPAM(write_mpamhcr_el2(MPAMHCR_EL2_INIT));
+	SPE(write_pmscr_el2(PMSCR_EL2_INIT));
+
+	write_cnthctl_el2(CNTHCTL_EL2_INIT);
+	write_mdcr_el2(MDCR_EL2_INIT);
+}
+
+void rmm_warmboot_main(void)
+{
+	/*
+	 * Do the rest of RMM architecture init
+	 */
+	rmm_arch_init();
+
+	/*
+	 * Finish initializing the slot buffer mechanism
+	 */
+	slot_buf_init();
+}
+
+void rmm_main(void)
+{
+	unsigned int rmm_el3_ifc_version = rmm_el3_ifc_get_version();
+	unsigned int manifest_version = rmm_el3_ifc_get_manifest_version();
+
+	/*
+	 * Report project name, version, build type and
+	 * commit information if it is present
+	 */
+	NOTICE("Booting %s v.%s(%s) %s Built with %s\n",
+		NAME, VERSION, RMM_BUILD_TYPE, COMMIT_INFO,
+#ifdef __clang__
+	VER_STRING("Clang ", __clang_major__, __clang_minor__,
+		__clang_patchlevel__)
+#else
+	VER_STRING("GCC ", __GNUC__, __GNUC_MINOR__,
+		__GNUC_PATCHLEVEL__)
+#endif
+		);
+
+	/* Report Boot Interface version */
+	NOTICE("RMM-EL3 Interface v.%u.%u\n",
+		RMM_EL3_IFC_GET_VERS_MAJOR(rmm_el3_ifc_version),
+		RMM_EL3_IFC_GET_VERS_MINOR(rmm_el3_ifc_version));
+
+	/* Report Boot Manifest version */
+	NOTICE("Boot Manifest Interface v.%u.%u\n",
+		RMM_EL3_MANIFEST_GET_VERS_MAJOR(manifest_version),
+		RMM_EL3_MANIFEST_GET_VERS_MINOR(manifest_version));
+
+	/* Report RMI/RSI ABI versions and build timestamp */
+	NOTICE("RMI/RSI ABI v.%u.%u/%u.%u built: %s %s\n",
+		RMI_ABI_VERSION_MAJOR, RMI_ABI_VERSION_MINOR,
+		RSI_ABI_VERSION_MAJOR, RSI_ABI_VERSION_MINOR,
+		__DATE__, __TIME__);
+
+	rmm_warmboot_main();
+
+	if (attestation_init() != 0) {
+		WARN("Attestation init failed.\n");
+	}
+}
diff --git a/runtime/core/inject_exp.c b/runtime/core/inject_exp.c
new file mode 100644
index 0000000..cc818f8
--- /dev/null
+++ b/runtime/core/inject_exp.c
@@ -0,0 +1,169 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <inject_exp.h>
+#include <rec.h>
+
+/*
+ * Calculate the address of the vector entry when an exception is inserted
+ * into the Realm.
+ *
+ * @vbar The base address of the vector table in the Realm.
+ * @spsr The Saved Program Status Register at EL2.
+ */
+static unsigned long calc_vector_entry(unsigned long vbar, unsigned long spsr)
+{
+	unsigned long offset;
+
+	if ((spsr & MASK(SPSR_EL2_MODE)) == SPSR_EL2_MODE_EL1h) {
+		offset = VBAR_CEL_SP_ELx_OFFSET;
+	} else if ((spsr & MASK(SPSR_EL2_MODE)) == SPSR_EL2_MODE_EL1t) {
+		offset = VBAR_CEL_SP_EL0_OFFSET;
+	} else if ((spsr & MASK(SPSR_EL2_MODE)) == SPSR_EL2_MODE_EL0t) {
+		if ((spsr & MASK(SPSR_EL2_nRW)) == SPSR_EL2_nRW_AARCH64) {
+			offset = VBAR_LEL_AA64_OFFSET;
+		} else {
+			offset = VBAR_LEL_AA32_OFFSET;
+		}
+	} else {
+		assert(false);
+		offset = 0UL;
+	}
+
+	return vbar + offset;
+}
+
+/*
+ * Calculate the value of the pstate when an exception
+ * is inserted into the Realm.
+ */
+static unsigned long calc_pstate(void)
+{
+	/*
+	 * The pstate is EL1, AArch64, SPSel = SP_ELx and:
+	 * DAIF = '1111b'
+	 * NZCV = '0000b'
+	 * TODO: setup TCO, DIT, UAO, PAN, SSBS, BTYPE
+	 */
+	unsigned long pstate = SPSR_EL2_MODE_EL1h |
+			       SPSR_EL2_nRW_AARCH64 |
+			       SPSR_EL2_F_BIT |
+			       SPSR_EL2_I_BIT |
+			       SPSR_EL2_A_BIT |
+			       SPSR_EL2_D_BIT;
+	return pstate;
+}
+
+/*
+ * Calculate the content of the Realm's esr_el1 register when
+ * the Synchronous Instruction or Data Abort is injected into
+ * the Realm (EL1).
+ *
+ * The value is constructed from the @esr_el2 & @spsr_el2 that
+ * are captured when the exception from the Realm was taken to EL2.
+ *
+ * The fault status code (ESR_EL1.I/DFSC) is set to @fsc
+ */
+static unsigned long calc_esr_idabort(unsigned long esr_el2,
+				      unsigned long spsr_el2,
+				      unsigned long fsc)
+{
+	/*
+	 * Copy esr_el2 into esr_el1 apart from the following fields:
+	 * - The exception class (EC). Its value depends on whether the
+	 *   exception to EL2 was from either EL1 or EL0.
+	 * - I/DFSC. It will be set to @fsc.
+	 * - FnV. It will set to zero.
+	 * - S1PTW. It will be set to zero.
+	 */
+	unsigned long esr_el1 = esr_el2 & ~(ESR_EL2_EC_MASK  |
+					    ESR_EL2_ABORT_FSC_MASK |
+					    ESR_EL2_ABORT_FNV_BIT |
+					    ESR_EL2_ABORT_S1PTW_BIT);
+
+	unsigned long ec = esr_el2 & ESR_EL2_EC_MASK;
+
+	assert((ec == ESR_EL2_EC_INST_ABORT) || (ec == ESR_EL2_EC_DATA_ABORT));
+	if ((spsr_el2 & MASK(SPSR_EL2_MODE)) != SPSR_EL2_MODE_EL0t) {
+		ec += 1UL << ESR_EL2_EC_SHIFT;
+	}
+	esr_el1 |= ec;
+
+	/*
+	 * Set the I/DFSC.
+	 */
+	assert((fsc & ~ESR_EL2_ABORT_FSC_MASK) == 0UL);
+	esr_el1 |= fsc;
+
+	/*
+	 * Set the EA.
+	 */
+	esr_el1 |= ESR_EL2_ABORT_EA_BIT;
+
+	return esr_el1;
+}
+
+/*
+ * Inject the Synchronous Instruction or Data Abort into the current REC.
+ * The I/DFSC field in the ESR_EL1 is set to @fsc
+ */
+void inject_sync_idabort(unsigned long fsc)
+{
+	unsigned long esr_el2 = read_esr_el2();
+	unsigned long far_el2 = read_far_el2();
+	unsigned long elr_el2 = read_elr_el2();
+	unsigned long spsr_el2 = read_spsr_el2();
+	unsigned long vbar_el2 = read_vbar_el12();
+
+	unsigned long esr_el1 = calc_esr_idabort(esr_el2, spsr_el2, fsc);
+	unsigned long pc = calc_vector_entry(vbar_el2, spsr_el2);
+	unsigned long pstate = calc_pstate();
+
+	write_far_el12(far_el2);
+	write_elr_el12(elr_el2);
+	write_spsr_el12(spsr_el2);
+	write_esr_el12(esr_el1);
+	write_elr_el2(pc);
+	write_spsr_el2(pstate);
+}
+
+/*
+ * Inject the Synchronous Instruction or Data Abort into @rec.
+ * The I/DFSC field in the ESR_EL1 is set to @fsc
+ */
+void inject_sync_idabort_rec(struct rec *rec, unsigned long fsc)
+{
+	rec->sysregs.far_el1 = rec->last_run_info.far;
+	rec->sysregs.elr_el1 = rec->pc;
+	rec->sysregs.spsr_el1 = rec->pstate;
+	rec->sysregs.esr_el1 = calc_esr_idabort(rec->last_run_info.esr,
+						rec->pstate, fsc);
+	rec->pc = calc_vector_entry(rec->sysregs.vbar_el1, rec->pstate);
+	rec->pstate = calc_pstate();
+}
+
+/*
+ * Inject the Undefined Synchronous Exception into the current REC.
+ */
+void realm_inject_undef_abort(void)
+{
+	unsigned long esr = ESR_EL2_IL_MASK | ESR_EL2_EC_UNKNOWN;
+	unsigned long elr = read_elr_el2();
+	unsigned long spsr = read_spsr_el2();
+	unsigned long vbar = read_vbar_el12();
+
+	unsigned long pc = calc_vector_entry(vbar, spsr);
+	unsigned long pstate = calc_pstate();
+
+	write_elr_el12(elr);
+	write_spsr_el12(spsr);
+	write_esr_el12(esr);
+
+	write_elr_el2(pc);
+	write_spsr_el2(pstate);
+}
diff --git a/runtime/core/run.c b/runtime/core/run.c
new file mode 100644
index 0000000..9127072
--- /dev/null
+++ b/runtime/core/run.c
@@ -0,0 +1,357 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <arch_features.h>
+#include <attestation.h>
+#include <buffer.h>
+#include <cpuid.h>
+#include <exit.h>
+#include <fpu_helpers.h>
+#include <rec.h>
+#include <run.h>
+#include <smc-rmi.h>
+#include <sve.h>
+#include <timers.h>
+
+static struct ns_state g_ns_data[MAX_CPUS];
+static uint8_t g_sve_data[MAX_CPUS][sizeof(struct sve_state)]
+		__attribute__((aligned(sizeof(__uint128_t))));
+
+/*
+ * Initialize the aux data and any buffer pointers to the aux granule memory for
+ * use by REC when it is entered.
+ */
+static void init_aux_data(struct rec_aux_data *aux_data,
+			  void *rec_aux,
+			  unsigned int num_rec_aux)
+{
+	aux_data->attest_heap_buf = (uint8_t *)rec_aux;
+
+	/* Ensure we have enough aux granules for use by REC */
+	assert(num_rec_aux >= REC_HEAP_PAGES);
+}
+
+/*
+ * The parent REC granules lock is expected to be acquired
+ * before functions map_rec_aux() and unmap_rec_aux() are called.
+ */
+static void *map_rec_aux(struct granule *rec_aux_pages[], unsigned long num_aux)
+{
+	void *rec_aux = NULL;
+
+	for (unsigned long i = 0UL; i < num_aux; i++) {
+		void *aux = granule_map(rec_aux_pages[i], SLOT_REC_AUX0 + i);
+
+		if (i == 0UL) {
+			rec_aux = aux;
+		}
+	}
+	return rec_aux;
+}
+
+static void unmap_rec_aux(void *rec_aux, unsigned long num_aux)
+{
+	unsigned char *rec_aux_vaddr = (unsigned char *)rec_aux;
+
+	for (unsigned long i = 0UL; i < num_aux; i++) {
+		buffer_unmap(rec_aux_vaddr + i * GRANULE_SIZE);
+	}
+}
+
+static void save_sysreg_state(struct sysreg_state *sysregs)
+{
+	sysregs->sp_el0 = read_sp_el0();
+	sysregs->sp_el1 = read_sp_el1();
+	sysregs->elr_el1 = read_elr_el12();
+	sysregs->spsr_el1 = read_spsr_el12();
+	sysregs->pmcr_el0 = read_pmcr_el0();
+	sysregs->pmuserenr_el0 = read_pmuserenr_el0();
+	sysregs->tpidrro_el0 = read_tpidrro_el0();
+	sysregs->tpidr_el0 = read_tpidr_el0();
+	sysregs->csselr_el1 = read_csselr_el1();
+	sysregs->sctlr_el1 = read_sctlr_el12();
+	sysregs->actlr_el1 = read_actlr_el1();
+	sysregs->cpacr_el1 = read_cpacr_el12();
+	sysregs->ttbr0_el1 = read_ttbr0_el12();
+	sysregs->ttbr1_el1 = read_ttbr1_el12();
+	sysregs->tcr_el1 = read_tcr_el12();
+	sysregs->esr_el1 = read_esr_el12();
+	sysregs->afsr0_el1 = read_afsr0_el12();
+	sysregs->afsr1_el1 = read_afsr1_el12();
+	sysregs->far_el1 = read_far_el12();
+	sysregs->mair_el1 = read_mair_el12();
+	sysregs->vbar_el1 = read_vbar_el12();
+
+	sysregs->contextidr_el1 = read_contextidr_el12();
+	sysregs->tpidr_el1 = read_tpidr_el1();
+	sysregs->amair_el1 = read_amair_el12();
+	sysregs->cntkctl_el1 = read_cntkctl_el12();
+	sysregs->par_el1 = read_par_el1();
+	sysregs->mdscr_el1 = read_mdscr_el1();
+	sysregs->mdccint_el1 = read_mdccint_el1();
+	sysregs->disr_el1 = read_disr_el1();
+	MPAM(sysregs->mpam0_el1 = read_mpam0_el1();)
+
+	/* Timer registers */
+	sysregs->cntpoff_el2 = read_cntpoff_el2();
+	sysregs->cntvoff_el2 = read_cntvoff_el2();
+	sysregs->cntp_ctl_el0 = read_cntp_ctl_el02();
+	sysregs->cntp_cval_el0 = read_cntp_cval_el02();
+	sysregs->cntv_ctl_el0 = read_cntv_ctl_el02();
+	sysregs->cntv_cval_el0 = read_cntv_cval_el02();
+}
+
+static void save_realm_state(struct rec *rec)
+{
+	save_sysreg_state(&rec->sysregs);
+
+	rec->pc = read_elr_el2();
+	rec->pstate = read_spsr_el2();
+
+	gic_save_state(&rec->sysregs.gicstate);
+}
+
+static void restore_sysreg_state(struct sysreg_state *sysregs)
+{
+	write_sp_el0(sysregs->sp_el0);
+	write_sp_el1(sysregs->sp_el1);
+	write_elr_el12(sysregs->elr_el1);
+	write_spsr_el12(sysregs->spsr_el1);
+	write_pmcr_el0(sysregs->pmcr_el0);
+	write_pmuserenr_el0(sysregs->pmuserenr_el0);
+	write_tpidrro_el0(sysregs->tpidrro_el0);
+	write_tpidr_el0(sysregs->tpidr_el0);
+	write_csselr_el1(sysregs->csselr_el1);
+	write_sctlr_el12(sysregs->sctlr_el1);
+	write_actlr_el1(sysregs->actlr_el1);
+	write_cpacr_el12(sysregs->cpacr_el1);
+	write_ttbr0_el12(sysregs->ttbr0_el1);
+	write_ttbr1_el12(sysregs->ttbr1_el1);
+	write_tcr_el12(sysregs->tcr_el1);
+	write_esr_el12(sysregs->esr_el1);
+	write_afsr0_el12(sysregs->afsr0_el1);
+	write_afsr1_el12(sysregs->afsr1_el1);
+	write_far_el12(sysregs->far_el1);
+	write_mair_el12(sysregs->mair_el1);
+	write_vbar_el12(sysregs->vbar_el1);
+
+	write_contextidr_el12(sysregs->contextidr_el1);
+	write_tpidr_el1(sysregs->tpidr_el1);
+	write_amair_el12(sysregs->amair_el1);
+	write_cntkctl_el12(sysregs->cntkctl_el1);
+	write_par_el1(sysregs->par_el1);
+	write_mdscr_el1(sysregs->mdscr_el1);
+	write_mdccint_el1(sysregs->mdccint_el1);
+	write_disr_el1(sysregs->disr_el1);
+	MPAM(write_mpam0_el1(sysregs->mpam0_el1);)
+	write_vmpidr_el2(sysregs->vmpidr_el2);
+
+	/* Timer registers */
+	write_cntpoff_el2(sysregs->cntpoff_el2);
+	write_cntvoff_el2(sysregs->cntvoff_el2);
+
+	/*
+	 * Restore CNTx_CVAL registers before CNTx_CTL to avoid
+	 * raising the interrupt signal briefly before lowering
+	 * it again due to some expired CVAL left in the timer
+	 * register.
+	 */
+	write_cntp_cval_el02(sysregs->cntp_cval_el0);
+	write_cntp_ctl_el02(sysregs->cntp_ctl_el0);
+	write_cntv_cval_el02(sysregs->cntv_cval_el0);
+	write_cntv_ctl_el02(sysregs->cntv_ctl_el0);
+}
+
+static void restore_realm_state(struct rec *rec)
+{
+	/*
+	 * Restore this early to give time to the timer mask to propagate to
+	 * the GIC.  Issue an ISB to ensure the register write is actually
+	 * performed before doing the remaining work.
+	 */
+	write_cnthctl_el2(rec->sysregs.cnthctl_el2);
+	isb();
+
+	restore_sysreg_state(&rec->sysregs);
+	write_elr_el2(rec->pc);
+	write_spsr_el2(rec->pstate);
+	write_hcr_el2(rec->sysregs.hcr_el2);
+
+	gic_restore_state(&rec->sysregs.gicstate);
+}
+
+static void configure_realm_stage2(struct rec *rec)
+{
+	write_vtcr_el2(rec->common_sysregs.vtcr_el2);
+	write_vttbr_el2(rec->common_sysregs.vttbr_el2);
+}
+
+static void save_ns_state(struct ns_state *ns_state)
+{
+	save_sysreg_state(&ns_state->sysregs);
+
+	/*
+	 * CNTHCTL_EL2 is saved/restored separately from the main system
+	 * registers, because the Realm configuration is written on every
+	 * entry to the Realm, see `check_pending_timers`.
+	 */
+	ns_state->sysregs.cnthctl_el2 = read_cnthctl_el2();
+
+	ns_state->icc_sre_el2 = read_icc_sre_el2();
+}
+
+static void restore_ns_state(struct ns_state *ns_state)
+{
+	restore_sysreg_state(&ns_state->sysregs);
+
+	/*
+	 * CNTHCTL_EL2 is saved/restored separately from the main system
+	 * registers, because the Realm configuration is written on every
+	 * entry to the Realm, see `check_pending_timers`.
+	 */
+	write_cnthctl_el2(ns_state->sysregs.cnthctl_el2);
+
+	write_icc_sre_el2(ns_state->icc_sre_el2);
+}
+
+static void activate_events(struct rec *rec)
+{
+	/*
+	 * The only event that may be activated at the Realm is the SError.
+	 */
+	if (rec->serror_info.inject) {
+		write_vsesr_el2(rec->serror_info.vsesr_el2);
+		write_hcr_el2(rec->sysregs.hcr_el2 | HCR_VSE);
+		rec->serror_info.inject = false;
+	}
+}
+
+void inject_serror(struct rec *rec, unsigned long vsesr)
+{
+	rec->serror_info.vsesr_el2 = vsesr;
+	rec->serror_info.inject = true;
+}
+
+void rec_run_loop(struct rec *rec, struct rmi_rec_exit *rec_exit)
+{
+	struct ns_state *ns_state;
+	int realm_exception_code;
+	void *rec_aux;
+	unsigned int cpuid = my_cpuid();
+
+	assert(rec->ns == NULL);
+
+	assert(cpuid < MAX_CPUS);
+	ns_state = &g_ns_data[cpuid];
+
+	/* ensure SVE/FPU context is cleared */
+	assert(ns_state->sve == NULL);
+	assert(ns_state->fpu == NULL);
+
+	/* Map auxiliary granules */
+	rec_aux = map_rec_aux(rec->g_aux, rec->num_rec_aux);
+
+	init_aux_data(&(rec->aux_data), rec_aux, rec->num_rec_aux);
+
+	/*
+	 * The attset heap on the REC aux pages is mapped now. It is time to
+	 * associate it with the current CPU.
+	 * This heap will be used for attestation RSI calls when the
+	 * REC is running.
+	 */
+	attestation_heap_ctx_assign_pe(&rec->alloc_info.ctx);
+
+	/*
+	 * Initialise the heap for attestation if necessary.
+	 */
+	if (!rec->alloc_info.ctx_initialised) {
+		(void)attestation_heap_ctx_init(rec->aux_data.attest_heap_buf,
+						REC_HEAP_PAGES * SZ_4K);
+		rec->alloc_info.ctx_initialised = true;
+	}
+
+	if (is_feat_sve_present()) {
+		ns_state->sve = (struct sve_state *)&g_sve_data[cpuid];
+	} else {
+		ns_state->fpu = (struct fpu_state *)&g_sve_data[cpuid];
+	}
+
+	save_ns_state(ns_state);
+	restore_realm_state(rec);
+
+	/* Prepare for lazy save/restore of FPU/SIMD registers. */
+	rec->ns = ns_state;
+	assert(rec->fpu_ctx.used == false);
+
+	configure_realm_stage2(rec);
+
+	do {
+		/*
+		 * We must check the status of the arch timers in every
+		 * iteration of the loop to ensure we update the timer
+		 * mask on each entry to the realm and that we report any
+		 * change in output level to the NS caller.
+		 */
+		if (check_pending_timers(rec)) {
+			rec_exit->exit_reason = RMI_EXIT_IRQ;
+			break;
+		}
+
+		activate_events(rec);
+		realm_exception_code = run_realm(&rec->regs[0]);
+	} while (handle_realm_exit(rec, rec_exit, realm_exception_code));
+
+	/*
+	 * Check if FPU/SIMD was used, and if it was, save the realm state,
+	 * restore the NS state, and reenable traps in CPTR_EL2.
+	 */
+	if (rec->fpu_ctx.used) {
+		unsigned long cptr;
+
+		cptr = read_cptr_el2();
+		cptr &= ~(CPTR_EL2_ZEN_MASK << CPTR_EL2_ZEN_SHIFT);
+		cptr |= (CPTR_EL2_ZEN_NO_TRAP_11 << CPTR_EL2_ZEN_SHIFT);
+		write_cptr_el2(cptr);
+
+		fpu_save_state(&rec->fpu_ctx.fpu);
+		if (ns_state->sve != NULL) {
+			restore_sve_state(ns_state->sve);
+		} else {
+			assert(ns_state->fpu != NULL);
+			fpu_restore_state(ns_state->fpu);
+		}
+
+		cptr = read_cptr_el2();
+		cptr &= ~(CPTR_EL2_FPEN_MASK << CPTR_EL2_FPEN_SHIFT);
+		cptr |= (CPTR_EL2_FPEN_TRAP_ALL_00 << CPTR_EL2_FPEN_SHIFT);
+		cptr &= ~(CPTR_EL2_ZEN_MASK << CPTR_EL2_ZEN_SHIFT);
+		cptr |= (CPTR_EL2_ZEN_TRAP_ALL_00 << CPTR_EL2_ZEN_SHIFT);
+		write_cptr_el2(cptr);
+		rec->fpu_ctx.used = false;
+	}
+
+	/*
+	 * Clear FPU/SVE context while exiting
+	 */
+	ns_state->sve = NULL;
+	ns_state->fpu = NULL;
+
+	/*
+	 * Clear NS pointer since that struct is local to this function.
+	 */
+	rec->ns = NULL;
+
+	report_timer_state_to_ns(rec_exit);
+
+	save_realm_state(rec);
+	restore_ns_state(ns_state);
+
+	/* Undo the heap association */
+	attestation_heap_ctx_unassign_pe(&rec->alloc_info.ctx);
+	/* Unmap auxiliary granules */
+	unmap_rec_aux(rec_aux, rec->num_rec_aux);
+}
diff --git a/runtime/core/sysregs.c b/runtime/core/sysregs.c
new file mode 100644
index 0000000..55fac96
--- /dev/null
+++ b/runtime/core/sysregs.c
@@ -0,0 +1,222 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <esr.h>
+#include <memory_alloc.h>
+#include <rec.h>
+#include <smc-rmi.h>
+
+#define SYSREG_READ_CASE(reg) \
+	case ESR_EL2_SYSREG_##reg: return read_##reg()
+
+static unsigned long read_idreg(unsigned int idreg)
+{
+	switch (idreg) {
+	SYSREG_READ_CASE(ID_AA64PFR0_EL1);
+	SYSREG_READ_CASE(ID_AA64PFR1_EL1);
+	/*
+	 * TODO: not supported without SVE:
+	 * SYSREG_READ_CASE(ID_AA64ZFR0_EL1);
+	 */
+	SYSREG_READ_CASE(ID_AA64DFR0_EL1);
+	SYSREG_READ_CASE(ID_AA64DFR1_EL1);
+	SYSREG_READ_CASE(ID_AA64AFR0_EL1);
+	SYSREG_READ_CASE(ID_AA64AFR1_EL1);
+	SYSREG_READ_CASE(ID_AA64ISAR0_EL1);
+	SYSREG_READ_CASE(ID_AA64ISAR1_EL1);
+	SYSREG_READ_CASE(ID_AA64MMFR0_EL1);
+	SYSREG_READ_CASE(ID_AA64MMFR1_EL1);
+	SYSREG_READ_CASE(ID_AA64MMFR2_EL1);
+
+	default:
+		/* All other encodings are in the RES0 space */
+		return 0UL;
+	}
+}
+
+/*
+ * Handle ID_AA64XXX<n>_EL1 instructions
+ */
+static bool handle_id_sysreg_trap(struct rec *rec,
+				  struct rmi_rec_exit *rec_exit,
+				  unsigned long esr)
+{
+	unsigned int rt;
+	unsigned long idreg, mask;
+
+	/*
+	 * We only set HCR_EL2.TID3 to trap ID registers at the moment and
+	 * that only traps reads of registers. Seeing a write here indicates a
+	 * consistency problem with the RMM and we should panic immediately.
+	 */
+	assert(!ESR_EL2_SYSREG_IS_WRITE(esr));
+
+	/*
+	 * Read Rt value from the issued instruction,
+	 * the general-purpose register used for the transfer.
+	 */
+	rt = ESR_EL2_SYSREG_ISS_RT(esr);
+
+	/* Handle writes to XZR register */
+	if (rt == 31U) {
+		return true;
+	}
+
+	idreg = esr & ESR_EL2_SYSREG_MASK;
+
+	if (idreg == ESR_EL2_SYSREG_ID_AA64ISAR1_EL1) {
+		/* Clear Address and Generic Authentication bits */
+		mask = (0xfUL << ESR_EL2_SYSREG_ID_AA64ISAR1_APA_SHIFT) |
+		       (0xfUL << ESR_EL2_SYSREG_ID_AA64ISAR1_API_SHIFT) |
+		       (0xfUL << ESR_EL2_SYSREG_ID_AA64ISAR1_GPA_SHIFT) |
+		       (0xfUL << ESR_EL2_SYSREG_ID_AA64ISAR1_GPI_SHIFT);
+	/*
+	 * Workaround for TF-A trapping AMU registers access
+	 * to EL3 in Realm state
+	 */
+	} else if (idreg == ESR_EL2_SYSREG_ID_AA64PFR0_EL1) {
+		/* Clear support for Activity Monitors Extension */
+		mask = MASK(ID_AA64PFR0_EL1_AMU);
+
+		/*
+		 * Clear support for SVE. This is a temporary fix until RMM
+		 * completely supports SVE.
+		 */
+		mask |= MASK(ID_AA64PFR0_EL1_SVE);
+	} else {
+		mask = 0UL;
+	}
+
+	ARRAY_WRITE(rec->regs, rt, read_idreg(idreg) & ~mask);
+
+	return true;
+}
+
+static bool handle_icc_el1_sysreg_trap(struct rec *rec,
+				       struct rmi_rec_exit *rec_exit,
+				       unsigned long esr)
+{
+	__unused unsigned long sysreg = esr & ESR_EL2_SYSREG_MASK;
+
+	/*
+	 * We should only have configured ICH_HCR_EL2 to trap on DIR and we
+	 * always trap on the SGIRs following the architecture, so make sure
+	 * we're not accidentally trapping on some other register here.
+	 */
+	assert((sysreg == ESR_EL2_SYSREG_ICC_DIR) ||
+	       (sysreg == ESR_EL2_SYSREG_ICC_SGI1R_EL1) ||
+	       (sysreg == ESR_EL2_SYSREG_ICC_SGI0R_EL1));
+
+	/*
+	 * The registers above should only trap to EL2 for writes, read
+	 * instructions are not defined and should cause an Undefined exception
+	 * at EL1.
+	 */
+	assert(ESR_EL2_SYSREG_IS_WRITE(esr));
+
+	rec_exit->exit_reason = RMI_EXIT_SYNC;
+	rec_exit->esr = esr;
+	return false;
+}
+
+typedef bool (*sysreg_handler_fn)(struct rec *rec, struct rmi_rec_exit *rec_exit,
+				  unsigned long esr);
+
+struct sysreg_handler {
+	unsigned long esr_mask;
+	unsigned long esr_value;
+	sysreg_handler_fn fn;
+};
+
+#define SYSREG_HANDLER(_mask, _value, _handler_fn) \
+	{ .esr_mask = (_mask), .esr_value = (_value), .fn = _handler_fn }
+
+static const struct sysreg_handler sysreg_handlers[] = {
+	SYSREG_HANDLER(ESR_EL2_SYSREG_ID_MASK, ESR_EL2_SYSREG_ID, handle_id_sysreg_trap),
+	SYSREG_HANDLER(ESR_EL2_SYSREG_ICC_EL1_MASK, ESR_EL2_SYSREG_ICC_EL1, handle_icc_el1_sysreg_trap),
+	SYSREG_HANDLER(ESR_EL2_SYSREG_MASK, ESR_EL2_SYSREG_ICC_PMR_EL1, handle_icc_el1_sysreg_trap)
+};
+
+static unsigned long get_sysreg_write_value(struct rec *rec, unsigned long esr)
+{
+	unsigned int rt = esr_sysreg_rt(esr);
+	unsigned long val;
+
+	/* Handle reads from XZR register */
+	if (rt == 31U) {
+		return 0UL;
+	}
+
+	ARRAY_READ(rec->regs, rt, val);
+	return val;
+}
+
+static void emulate_sysreg_access_ns(struct rec *rec, struct rmi_rec_exit *rec_exit,
+				     unsigned long esr)
+{
+	if (ESR_EL2_SYSREG_IS_WRITE(esr)) {
+		rec_exit->gprs[0] = get_sysreg_write_value(rec, esr);
+	}
+}
+
+/*
+ * Handle trapped MSR, MRS or System instruction execution
+ * in AArch64 state
+ */
+bool handle_sysreg_access_trap(struct rec *rec, struct rmi_rec_exit *rec_exit,
+			       unsigned long esr)
+{
+	/*
+	 * Read Rt value from the issued instruction,
+	 * the general-purpose register used for the transfer.
+	 */
+	unsigned int rt = ESR_EL2_SYSREG_ISS_RT(esr);
+	unsigned int i;
+	unsigned int __unused op0, op1, crn, crm, op2;
+	unsigned long __unused sysreg;
+
+	/* Check for 32-bit instruction trapped */
+	assert(ESR_IL(esr) != 0UL);
+
+	for (i = 0U; i < ARRAY_LEN(sysreg_handlers); i++) {
+		const struct sysreg_handler *handler = &sysreg_handlers[i];
+		bool handled;
+
+		if ((esr & handler->esr_mask) == handler->esr_value) {
+			handled = handler->fn(rec, rec_exit, esr);
+			if (!handled) {
+				emulate_sysreg_access_ns(rec, rec_exit, esr);
+			}
+			return handled;
+		}
+	}
+
+	/*
+	 * For now, treat all unhandled accesses as RAZ/WI.
+	 * Handle writes to XZR register.
+	 */
+	if (!ESR_EL2_SYSREG_IS_WRITE(esr) && (rt != 31U)) {
+		ARRAY_WRITE(rec->regs, rt, 0UL);
+	}
+
+	sysreg = esr & ESR_EL2_SYSREG_MASK;
+
+	/* Extract sytem register encoding */
+	op0 = EXTRACT(ESR_EL2_SYSREG_TRAP_OP0, sysreg);
+	op1 = EXTRACT(ESR_EL2_SYSREG_TRAP_OP1, sysreg);
+	crn = EXTRACT(ESR_EL2_SYSREG_TRAP_CRN, sysreg);
+	crm = EXTRACT(ESR_EL2_SYSREG_TRAP_CRM, sysreg);
+	op2 = EXTRACT(ESR_EL2_SYSREG_TRAP_OP2, sysreg);
+
+	INFO("Unhandled %s S%u_%u_C%u_C%u_%u\n",
+		ESR_EL2_SYSREG_IS_WRITE(esr) ? "write" : "read",
+		op0, op1, crn, crm, op2);
+
+	return true;
+}
diff --git a/runtime/core/vmid.c b/runtime/core/vmid.c
new file mode 100644
index 0000000..64c30b3
--- /dev/null
+++ b/runtime/core/vmid.c
@@ -0,0 +1,65 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch_features.h>
+#include <assert.h>
+#include <atomics.h>
+#include <sizes.h>
+#include <spinlock.h>
+#include <vmid.h>
+
+#define VMID8_COUNT		(1U << 8)
+#define VMID16_COUNT		(1U << 16)
+#define MAX_VMID_COUNT		VMID16_COUNT
+#define VMID_ARRAY_LONG_SIZE	(MAX_VMID_COUNT / BITS_PER_UL)
+
+/*
+ * The bitmap for the reserved/used VMID values.
+ */
+static unsigned long vmids[VMID_ARRAY_LONG_SIZE];
+
+/*
+ * Marks the VMID value to be in use. It returns:
+ * - True, on success
+ * - False, if the vmid is out of range,
+ *   or if it was already reserved (in use).
+ */
+bool vmid_reserve(unsigned int vmid)
+{
+	unsigned int offset;
+	unsigned int vmid_count;
+
+	/* Number of supported VMID values */
+	vmid_count = is_feat_vmid16_present() ?	VMID16_COUNT : VMID8_COUNT;
+	/*
+	 * The input from NS as part of RMI_REALM_CREATE is 'short int' type,
+	 * so this check will not fail on systems with FEAT_VMID16 implemented.
+	 */
+	if (vmid >= vmid_count) {
+		return false;
+	}
+
+	offset = vmid / BITS_PER_UL;
+
+	return !atomic_bit_set_acquire_release_64(&vmids[offset], vmid);
+}
+
+/*
+ * Marks the VMID value to be not in use.
+ */
+void vmid_free(unsigned int vmid)
+{
+	unsigned int offset;
+	unsigned int __unused vmid_count;
+
+	/* Number of supported VMID values */
+	vmid_count = is_feat_vmid16_present() ? VMID16_COUNT : VMID8_COUNT;
+
+	/* Check the number of supported VMID values */
+	assert(vmid < vmid_count);
+	offset = vmid / BITS_PER_UL;
+
+	atomic_bit_clear_release_64(&vmids[offset], vmid);
+}