TF-RMM Release v0.1.0

This is the first external release of TF-RMM and provides a reference
implementation of Realm Management Monitor (RMM) as specified by the
RMM Beta0 specification[1].

The `docs/readme.rst` has more details about the project and
`docs/getting_started/getting-started.rst` has details on how to get
started with TF-RMM.

[1] https://developer.arm.com/documentation/den0137/1-0bet0/?lang=en

Signed-off-by: Soby Mathew <soby.mathew@arm.com>
Change-Id: I205ef14c015e4a37ae9ae1a64e4cd22eb8da746e
diff --git a/runtime/CMakeLists.txt b/runtime/CMakeLists.txt
new file mode 100644
index 0000000..4c7fbfa
--- /dev/null
+++ b/runtime/CMakeLists.txt
@@ -0,0 +1,108 @@
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+#
+
+include(ArmTargetLinkerScript)
+
+add_executable(rmm-runtime)
+
+target_link_options(rmm-runtime
+    PRIVATE "-Wl,-Map=$<TARGET_FILE:rmm-runtime>.map")
+
+arm_config_option(
+    NAME RSI_LOG_LEVEL
+    HELP "Log level to apply for RSI calls (0 - 50)"
+    TYPE STRING
+    DEFAULT 40)
+
+target_compile_definitions(rmm-runtime
+    PRIVATE "RSI_LOG_LEVEL=${RSI_LOG_LEVEL}")
+
+arm_config_option(
+    NAME RMM_NUM_PAGES_PER_STACK
+    HELP "Number of pages to use per CPU stack"
+    TYPE STRING
+    DEFAULT 3
+    ADVANCED)
+
+target_compile_definitions(rmm-runtime
+    PRIVATE "RMM_NUM_PAGES_PER_STACK=${RMM_NUM_PAGES_PER_STACK}")
+
+target_link_libraries(rmm-runtime
+    PRIVATE rmm-lib
+            rmm-platform)
+
+target_include_directories(rmm-runtime
+    PRIVATE "include")
+
+if(NOT RMM_ARCH STREQUAL fake_host)
+    target_sources(rmm-runtime
+        PRIVATE "core/aarch64/entry.S"
+                "core/aarch64/head.S"
+                "core/aarch64/helpers.S"
+                "core/aarch64/ns_access.S"
+                "core/aarch64/run-asm.S"
+                "core/aarch64/vectors.S")
+else()
+    target_sources(rmm-runtime
+        PRIVATE "core/fake_host/runtime_core_stub.c")
+endif()
+
+target_sources(rmm-runtime
+    PRIVATE "core/exit.c"
+            "core/handler.c"
+            "core/init.c"
+            "core/inject_exp.c"
+            "core/run.c"
+            "core/sysregs.c"
+            "core/vmid.c")
+
+target_sources(rmm-runtime
+    PRIVATE "rmi/feature.c"
+            "rmi/granule.c"
+            "rmi/realm.c"
+            "rmi/rec.c"
+            "rmi/rtt.c"
+            "rmi/run.c"
+            "rmi/system.c")
+
+target_sources(rmm-runtime
+    PRIVATE "rsi/config.c"
+            "rsi/host_call.c"
+            "rsi/logger.c"
+            "rsi/memory.c"
+            "rsi/psci.c"
+            "rsi/realm_ipa_helper.c"
+            "rsi/realm_attest.c"
+            "rsi/system.c")
+
+arm_config_option(
+    NAME RMM_MAX_SIZE
+    HELP "Maximum size for RMM image"
+    TYPE STRING
+    DEFAULT 0x0
+    ADVANCED)
+
+if(RMM_MAX_SIZE EQUAL 0x0)
+    message(FATAL_ERROR "RMM_MAX_SIZE is not initialized")
+endif()
+
+if(NOT RMM_ARCH STREQUAL fake_host)
+    arm_target_linker_script(rmm-runtime "linker.lds")
+
+    set_target_properties(rmm-runtime-lds
+        PROPERTIES COMPILE_DEFINITIONS "__LINKER__")
+
+    set_property(TARGET rmm-runtime-lds APPEND
+        PROPERTY COMPILE_DEFINITIONS "GRANULE_SIZE=UL(${GRANULE_SIZE})")
+
+    set_property(TARGET rmm-runtime-lds APPEND
+        PROPERTY COMPILE_DEFINITIONS "MAX_CPUS=UL(${MAX_CPUS})")
+
+    set_property(TARGET rmm-runtime-lds APPEND
+        PROPERTY COMPILE_DEFINITIONS "RMM_MAX_SIZE=UL(${RMM_MAX_SIZE})")
+
+    set_property(TARGET rmm-runtime-lds APPEND
+        PROPERTY COMPILE_DEFINITIONS "RMM_NUM_PAGES_PER_STACK=UL(${RMM_NUM_PAGES_PER_STACK})")
+endif()
diff --git a/runtime/core/aarch64/entry.S b/runtime/core/aarch64/entry.S
new file mode 100644
index 0000000..5b557d6
--- /dev/null
+++ b/runtime/core/aarch64/entry.S
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <asm_macros.S>
+#include <smc.h>
+
+.globl rmm_handler
+
+func rmm_handler
+	/*
+	 * Save Link Register and X4, as per SMCCC v1.2 its value
+	 * must be preserved unless it contains result, as specified
+	 * in the function definition.
+	 */
+	stp	x4, lr, [sp, #-16]!
+
+	/*
+	 * Zero the space for X0-X3 in the smc_result structure
+	 * and pass its address as the last argument.
+	 */
+	stp	xzr, xzr, [sp, #-16]!
+	stp	xzr, xzr, [sp, #-16]!
+	mov	x7, sp
+
+	bl	handle_ns_smc
+
+	/*
+	 * Copy command output values back to caller. Since this is
+	 * done through SMC, X0 is used as the FID, and X1-X5 contain
+	 * the values of X0-X4 copied from the smc_result structure.
+	 */
+	ldr	x0, =SMC_RMM_REQ_COMPLETE
+	ldp	x1, x2, [sp], #16
+	ldp	x3, x4, [sp], #16
+	ldp	x5, lr, [sp], #16
+
+	smc	#0
+
+	/* Continue the rmm handling loop */
+	b	rmm_handler
+endfunc rmm_handler
diff --git a/runtime/core/aarch64/head.S b/runtime/core/aarch64/head.S
new file mode 100644
index 0000000..16d3dac
--- /dev/null
+++ b/runtime/core/aarch64/head.S
@@ -0,0 +1,175 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <rmm_el3_ifc.h>
+#include <sizes.h>
+#include <smc.h>
+#include <xlat_tables.h>
+
+#define RMM_STACK_SIZE		(SZ_4K * RMM_NUM_PAGES_PER_STACK)
+
+.globl rmm_entry
+
+/*
+ * Initialize essential R-EL2 sysregs and C runtime environment
+ */
+.macro rmm_el2_init_env _vector, _is_cold_boot_flag, _warm_boot
+
+	/*
+	 * Stash arguments from previous boot stage
+	 */
+	mov	x20, x0
+	mov	x21, x1
+	mov	x22, x2
+	mov	x23, x3
+
+	mov_imm	x1, SCTLR_EL2_INIT
+	msr	sctlr_el2, x1
+
+	mov_imm	x2, HCR_EL2_INIT
+	msr	hcr_el2, x2
+
+	mov_imm	x3, CPTR_EL2_INIT
+	msr	cptr_el2, x3
+
+	mov_imm	x4, ICC_SRE_EL2_INIT
+	msr	ICC_SRE_EL2, x4
+
+	isb
+
+	ldr	x1, \_is_cold_boot_flag
+	cbz	x1, 1f
+
+	/*
+	 * As PIE is enabled, fixup the Global Descriptor Table only
+	 * once during cold boot. This is needed before accessing any
+	 * symbol addresses.
+	 */
+	bl	fixup_gdt_reloc
+
+	/* Cold and warm boot need to go through this path */
+1:
+	/* Early validate and init CPU Id */
+	mov	x0, x20
+	bl	rmm_el3_ifc_validate_cpuid
+
+	/* Setup stack on this CPU. X0 already contains the CPU Id */
+	bl	rmm_get_my_stack
+	mov	sp, x0
+
+	/*
+	 * Setup exception vectors
+	 */
+	adrp	x3, \_vector
+	add	x3, x3, :lo12:\_vector
+	msr	vbar_el2, x3
+	isb
+
+	/*
+	 * Find out whether this is a cold or warm boot
+	 */
+	ldr	x1, \_is_cold_boot_flag
+	cbnz	x1, 2f
+
+	/*
+	 * Restore arguments in preparation for the warm boot path
+	 */
+	mov	x0, x20
+	mov	x1, x21
+	mov	x2, x22
+	mov	x3, x23
+	b	\_warm_boot
+
+2:
+	/*
+	 * Update cold boot flag to indicate cold boot is done
+	 */
+	adr	x2, \_is_cold_boot_flag
+	str	xzr, [x2]
+
+	/*
+	 * Initialize BSS section
+	 */
+	adrp	x0, bss_start
+	add	x0, x0, :lo12:bss_start
+	adrp	x1, bss_end
+	add	x1, x1, :lo12:bss_end
+	sub	x2, x1, x0
+	mov	x1, xzr
+	bl	memset
+
+	/*
+	 * Restore args received from previous BL image
+	 */
+	mov	x0, x20
+	mov	x1, x21
+	mov	x2, x22
+	mov	x3, x23
+.endm
+
+/*
+ * This is the main entry for both Primary and secondary PEs.
+ */
+func rmm_entry
+
+	rmm_el2_init_env el2_vectors, cold_boot_flag, skip_to_warmboot
+
+	/*
+	 * Initialize platform specific peripherals like UART and
+	 * xlat tables.
+	 */
+	bl	plat_setup
+	bl	xlat_enable_mmu_el2
+
+	bl	rmm_main
+	b	smc_ret
+
+skip_to_warmboot:
+	/*
+	 * Carry on with the rest of the RMM warmboot path
+	 */
+	bl	plat_warmboot_setup
+	bl	xlat_enable_mmu_el2
+
+	bl	rmm_warmboot_main
+smc_ret:
+	mov_imm	x0, SMC_RMM_BOOT_COMPLETE
+	mov_imm	x1, E_RMM_BOOT_SUCCESS
+	smc	#0
+
+	/* Jump to the SMC handler post-init */
+	b	rmm_handler
+
+	/*
+	 * Flag to mark if it is a cold boot.
+	 * 1: cold boot, 0: warmboot.
+	 */
+.align 3
+cold_boot_flag:
+	.dword		1
+endfunc rmm_entry
+
+/*
+ * Return the stack for a given PE index in x0
+ * stack-start				     stack_end
+ *       o--sz---o....o--sz---o--sz---o--sz---o
+ *       ^\_____/^....^\_____/^\_____/^\_____/^
+ * id = (MAX_CPU-1)      2       1       0
+ * Arg : x0 - CPU position
+ * sz: RMM_STACK_SIZE bytes.
+ */
+func rmm_get_my_stack
+#ifndef NDEBUG
+	cmp	x0, #MAX_CPUS
+	ASM_ASSERT lo
+#endif
+	adrp	x1, stack_end
+	add	x1, x1, :lo12:stack_end
+	mov	x2, #(RMM_STACK_SIZE)	/* stack size per CPU */
+	umsubl	x0, w0, w2, x1
+	ret
+endfunc rmm_get_my_stack
diff --git a/runtime/core/aarch64/helpers.S b/runtime/core/aarch64/helpers.S
new file mode 100644
index 0000000..55bfc97
--- /dev/null
+++ b/runtime/core/aarch64/helpers.S
@@ -0,0 +1,127 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <xlat_defs.h>
+
+	.globl	fixup_gdt_reloc
+
+/* ---------------------------------------------------------------------------
+ * Helper to fixup Global Descriptor table (GDT) and dynamic relocations
+ * (.rela.dyn) at runtime.
+ *
+ * This function is meant to be used when the firmware is compiled with -fpie
+ * and linked with -pie options. We rely on the linker script exporting
+ * appropriate markers for start and end of the section. For Global Offset
+ * Table (GOT), we expect 'rmm_got_start' and 'rmm_got_end' symbols to be
+ * defined. Similarly for *.rela.dyn, we expect rmm_rela_start and rmm_rela_end
+ * to be defined. We also expect `rmm_base` and `rmm_end` symbols to be
+ * defined by the linker script and are 4KB aligned. The RMM should be
+ * statically linked to start at 0x0.
+ *
+ * Clobber list: x0 to x7.
+ * ---------------------------------------------------------------------------
+ */
+
+/* Relocation codes */
+#define	R_AARCH64_NONE		0
+#define	R_AARCH64_RELATIVE	1027
+
+func fixup_gdt_reloc
+	/* Lower Limit for fixup */
+	mov	x0, xzr
+	/* rmm_base and rmm_end are 4KB aligned hence adrp is enough */
+	adrp	x2, rmm_base
+	adrp	x1, rmm_end
+	/* Upper Limit for fixup (rmm_end - rmm_base) */
+	sub	x1, x1, x2
+
+	/*
+	 * Since RMM will be compiled to start at 0x0, the current
+         * PC relative `rmm_base` loaded in x2 will be the Diff(S)
+	 * to be applied to the fixups.
+	 */
+	cbz	x2, 4f	/* Diff(S) = 0. No relocation needed */
+
+	adrp	x6, rmm_got_start
+	add	x6, x6, :lo12:rmm_got_start
+	adrp	x7, rmm_got_end
+	add	x7, x7, :lo12:rmm_got_end
+
+	/*
+	 * GOT is an array of 64_bit addresses which must be fixed up as
+	 * new_addr = old_addr + Diff(S).
+	 * The new_addr is the address currently the binary is executing from
+	 * and old_addr is the address at compile time.
+	 */
+1:	ldr	x3, [x6]
+	/* Skip adding offset if address is < lower limit */
+	cmp	x3, x0
+	b.lo	2f
+
+	/* Skip adding offset if address is > upper limit */
+	cmp	x3, x1
+	b.hi	2f
+	add	x3, x3, x2
+	str	x3, [x6]
+
+2:	add	x6, x6, #8
+	cmp	x6, x7
+	b.lo	1b
+
+	/* Starting dynamic relocations */
+3:	adrp	x6, rmm_rela_start
+	add	x6, x6, :lo12:rmm_rela_start
+	adrp	x7, rmm_rela_end
+	add	x7, x7, :lo12:rmm_rela_end
+
+	/*
+	 * According to ELF-64 specification, the RELA data structure is as
+	 * follows:
+	 *	typedef struct {
+	 *		Elf64_Addr r_offset;
+	 *		Elf64_Xword r_info;
+	 *		Elf64_Sxword r_addend;
+	 *	} Elf64_Rela;
+	 *
+	 * r_offset is address of reference
+	 * r_info is symbol index and type of relocation (in this case
+	 * code 1027 which corresponds to R_AARCH64_RELATIVE).
+	 * r_addend is constant part of expression.
+	 *
+	 * Size of Elf64_Rela structure is 24 bytes.
+	 */
+
+1:	ldr	x3, [x6, #8]	/* r_info */
+	/* Skip R_AARCH64_NONE entry with code 0 */
+	cbz	x3, 2f
+
+#ifndef NDEBUG
+	/* Assert that the relocation type is R_AARCH64_RELATIVE */
+	cmp	x3, #R_AARCH64_RELATIVE
+	ASM_ASSERT eq
+#endif
+	ldr	x4, [x6, #16]	/* r_addend */
+
+	/* Skip adding offset if r_addend is < lower limit */
+	cmp	x4, x0
+	b.lo	2f
+
+	/* Skip adding offset if r_addend entry is > upper limit */
+	cmp	x4, x1
+	b.hi	2f
+
+	ldr	x3, [x6]	/* r_offset */
+	add	x4, x4, x2	/* Diff(S) + r_addend */
+	str	x4, [x3, x2]
+
+2:	add	x6, x6, #24
+	cmp	x6, x7
+	b.lo	1b
+
+4:
+	ret
+endfunc fixup_gdt_reloc
diff --git a/runtime/core/aarch64/ns_access.S b/runtime/core/aarch64/ns_access.S
new file mode 100644
index 0000000..b39d2ec
--- /dev/null
+++ b/runtime/core/aarch64/ns_access.S
@@ -0,0 +1,79 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <asm_macros.S>
+
+.section ".text"
+
+/*
+ * The following addresses are registered with the exception handler:
+ */
+.global ns_read
+.global ns_write
+
+.global memcpy_ns_read
+.global memcpy_ns_write
+.global ns_access_ret_0
+
+/*
+ * Copy data from NS into Realm memory.
+ * The function returns 1 if the copy succeeds.
+ * If the access to the NS memory generates a GPF, the exception handler
+ * returns to ns_access_ret_0 and 0 is returned to the caller.
+ * In case of failure (when 0 is returned), partial data may have been
+ * written to the destination buffer
+ *
+ * x0 - The address of buffer in Realm memory to write into
+ * x1 - The address of buffer in NS memory to read from.
+ * x2 - The number of bytes to read in bytes.
+ * All arguments must be aligned to 8 bytes.
+ */
+func memcpy_ns_read
+	cbz	x2, 2f
+	mov	x3, #0
+1:
+ns_read:
+	ldr	x4, [x1], #8
+	str	x4, [x0], #8
+	add	x3, x3, #8
+	cmp	x3, x2
+	bne	1b
+2:
+	mov	x0, #1
+	ret
+endfunc memcpy_ns_read
+
+/*
+ * Copy data from Realm into NS memory.
+ * The function returns 1 if the copy succeeds.
+ * If the access to the NS memory generates a GPF, the exception handler
+ * returns to ns_access_ret_0 and 0 is returned to the caller.
+ * In case of failure (when 0 is returned), partial data may have been
+ * written to the destination buffer
+ *
+ * x0 - The address of buffer in NS memory to write into
+ * x1 - The address of buffer in Realm memory to read from.
+ * x2 - The number of bytes to write.
+ * All arguments must be aligned to 8 bytes.
+ */
+func memcpy_ns_write
+	cbz	x2, 2f
+	mov	x3, #0
+1:
+	ldr	x4, [x1], #8
+ns_write:
+	str	x4, [x0], #8
+	add	x3, x3, #8
+	cmp	x3, x2
+	bne	1b
+2:
+	mov	x0, #1
+	ret
+endfunc memcpy_ns_write
+
+func ns_access_ret_0
+	mov	x0, #0
+	ret
+endfunc ns_access_ret_0
diff --git a/runtime/core/aarch64/run-asm.S b/runtime/core/aarch64/run-asm.S
new file mode 100644
index 0000000..7024b0e
--- /dev/null
+++ b/runtime/core/aarch64/run-asm.S
@@ -0,0 +1,103 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <asm_macros.S>
+#include <rec.h>
+#include <sve.h>
+
+.globl run_realm
+.globl realm_exit
+
+/*
+ * int run_realm(unsigned long *regs);
+ *
+ * Per the AAPCS we must preserve x19-x29, along with the SP. We may freely
+ * corrupt x0-18 and the flags, but need the LR to return to our caller.
+ */
+func run_realm
+	/* Push RMM registers to the stack */
+	sub	sp, sp, #(16 * 6)
+	stp	x19, x20, [sp, #(16 * 0)]
+	stp	x21, x22, [sp, #(16 * 1)]
+	stp	x23, x24, [sp, #(16 * 2)]
+	stp	x25, x26, [sp, #(16 * 3)]
+	stp	x27, x28, [sp, #(16 * 4)]
+	stp	x29, x30, [sp, #(16 * 5)]
+
+	/* Push rec pointer to the stack for realm_exit */
+	stp	x0, xzr, [sp, #-16]!
+
+	/* load realm GPRs (offsetof(rec, rec->regs[0]) == 0) */
+	ldp	x2,  x3,  [x0, #(16 * 1)]
+	ldp	x4,  x5,  [x0, #(16 * 2)]
+	ldp	x6,  x7,  [x0, #(16 * 3)]
+	ldp	x8,  x9,  [x0, #(16 * 4)]
+	ldp	x10, x11, [x0, #(16 * 5)]
+	ldp	x12, x13, [x0, #(16 * 6)]
+	ldp	x14, x15, [x0, #(16 * 7)]
+	ldp	x16, x17, [x0, #(16 * 8)]
+	ldp	x18, x19, [x0, #(16 * 9)]
+	ldp	x20, x21, [x0, #(16 * 10)]
+	ldp	x22, x23, [x0, #(16 * 11)]
+	ldp	x24, x25, [x0, #(16 * 12)]
+	ldp	x26, x27, [x0, #(16 * 13)]
+	ldp	x28, x29, [x0, #(16 * 14)]
+	ldr	x30,      [x0, #(16 * 15)]
+	ldp	x0,  x1,  [x0, #(16 * 0)]
+
+	eret
+	sb
+endfunc run_realm
+
+func realm_exit
+	/*
+	 * We come here with realm's x0 and x1 on the stack and exit_reason in
+	 * x0. See el2_vectors in runtime/core/aarch64/vectors.S.
+	 *
+	 * First, restore realm_gprs ptr to x1
+	 */
+
+	/* Recover the rec pointer */
+	ldr	x1, [sp, #16]
+
+	/* Store realm GPRs (offsetof(rec, rec->regs[0]) == 0) */
+	stp	x2,  x3,  [x1, #(16 * 1)]
+	stp	x4,  x5,  [x1, #(16 * 2)]
+	stp	x6,  x7,  [x1, #(16 * 3)]
+	stp	x8,  x9,  [x1, #(16 * 4)]
+	stp	x10, x11, [x1, #(16 * 5)]
+	stp	x12, x13, [x1, #(16 * 6)]
+	stp	x14, x15, [x1, #(16 * 7)]
+	stp	x16, x17, [x1, #(16 * 8)]
+	stp	x18, x19, [x1, #(16 * 9)]
+	stp	x20, x21, [x1, #(16 * 10)]
+	stp	x22, x23, [x1, #(16 * 11)]
+	stp	x24, x25, [x1, #(16 * 12)]
+	stp	x26, x27, [x1, #(16 * 13)]
+	stp	x28, x29, [x1, #(16 * 14)]
+	str	x30,      [x1, #(16 * 15)]
+
+	/* x0 and x1 as stored by el2_vectors */
+	ldp	x2, x3,	  [sp]
+	stp	x2, x3,   [x1, #(16 * 0)]
+
+	/* Move sp to the realm regs */
+	add	sp, sp, #32
+
+	/*
+	 * Restore the RMM registers from the stack
+	 * including the return address to return to
+	 * after calling run_realm().
+	 */
+	ldp	x19, x20, [sp, #(16 * 0)]
+	ldp	x21, x22, [sp, #(16 * 1)]
+	ldp	x23, x24, [sp, #(16 * 2)]
+	ldp	x25, x26, [sp, #(16 * 3)]
+	ldp	x27, x28, [sp, #(16 * 4)]
+	ldp	x29, x30, [sp, #(16 * 5)]
+	add	sp, sp, #(16 * 6)
+
+	ret
+endfunc realm_exit
diff --git a/runtime/core/aarch64/vectors.S b/runtime/core/aarch64/vectors.S
new file mode 100644
index 0000000..e5cbaf0
--- /dev/null
+++ b/runtime/core/aarch64/vectors.S
@@ -0,0 +1,107 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.section ".text"
+
+	.macro ventry_unused error_message
+	.balign	0x80
+	wfe
+	b	.-4
+	.endm
+
+	.macro ventry label
+		.balign	0x80
+		b	\label
+	.endm
+
+	// VBAR_EL3[10:0] are hardwired to 0, align vector address accordingly
+	.balign 0x800
+
+ENTRY(el2_vectors):
+	ventry_unused	exc_sync_sp0
+	ventry_unused	exc_irq_sp0
+	ventry_unused	exc_fiq_sp0
+	ventry_unused	exc_serror_sp0
+
+	ventry		el2_sync_cel
+	ventry_unused	exc_irq_spx
+	ventry_unused	exc_fiq_spx
+	ventry_unused	exc_serror_spx
+
+	ventry		el2_sync_lel
+	ventry		el2_irq_lel
+	ventry		el2_fiq_lel
+	ventry		el2_serror_lel
+
+	ventry_unused	exc_sync_lel_32
+	ventry_unused	exc_irq_lel_32
+	ventry_unused	exc_fiq_lel_32
+	ventry_unused	exc_serror_lel_32
+ENDPROC(el2_vectors)
+
+el2_sync_lel:
+	stp	x0, x1, [sp, #-16]!
+	mov	x0, #ARM_EXCEPTION_SYNC_LEL
+	b	realm_exit
+ENDPROC(el2_sync_lel)
+
+el2_irq_lel:
+	stp	x0, x1, [sp, #-16]!
+	mov	x0, #ARM_EXCEPTION_IRQ_LEL
+	b	realm_exit
+ENDPROC(el2_sync_lel)
+
+el2_fiq_lel:
+	stp	x0, x1, [sp, #-16]!
+	mov	x0, #ARM_EXCEPTION_FIQ_LEL
+	b	realm_exit
+ENDPROC(el2_sync_lel)
+
+el2_serror_lel:
+	stp	x0, x1, [sp, #-16]!
+	mov	x0, #ARM_EXCEPTION_SERROR_LEL
+	b	realm_exit
+ENDPROC(el2_serror_lel)
+
+el2_sync_cel:
+	stp	x0, x1, [sp, #-16]!
+	stp	x2, x3, [sp, #-16]!
+	stp	x4, x5, [sp, #-16]!
+	stp	x6, x7, [sp, #-16]!
+	stp	x8, x9, [sp, #-16]!
+	stp	x10, x11, [sp, #-16]!
+	stp	x12, x13, [sp, #-16]!
+	stp	x14, x15, [sp, #-16]!
+	stp	x16, x17, [sp, #-16]!
+	stp	x18, xzr, [sp, #-16]!
+	stp	x29, lr, [sp, #-16]!
+
+	bl	handle_rmm_trap
+
+	/*
+	 * If it doesn't panic the RMM, handle_rmm_trap
+	 * returns the new value of PC in x0.
+	 */
+	msr	elr_el2, x0
+
+	ldp	x29, lr, [sp], #16
+	ldp	x18, xzr, [sp], #16
+	ldp	x16, x17, [sp], #16
+	ldp	x14, x15, [sp], #16
+	ldp	x12, x13, [sp], #16
+	ldp	x10, x11, [sp], #16
+	ldp	x8, x9, [sp], #16
+	ldp	x6, x7, [sp], #16
+	ldp	x4, x5, [sp], #16
+	ldp	x2, x3, [sp], #16
+	ldp	x0, x1, [sp], #16
+
+	eret
+	sb
+
+ENDPROC(el2_sync_cel)
diff --git a/runtime/core/exit.c b/runtime/core/exit.c
new file mode 100644
index 0000000..b16fc5a
--- /dev/null
+++ b/runtime/core/exit.c
@@ -0,0 +1,759 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <attestation_token.h>
+#include <buffer.h>
+#include <esr.h>
+#include <exit.h>
+#include <fpu_helpers.h>
+#include <gic.h>
+#include <granule.h>
+#include <inject_exp.h>
+#include <memory_alloc.h>
+#include <psci.h>
+#include <realm.h>
+#include <realm_attest.h>
+#include <rec.h>
+#include <rsi-config.h>
+#include <rsi-handler.h>
+#include <rsi-host-call.h>
+#include <rsi-logger.h>
+#include <rsi-memory.h>
+#include <rsi-walk.h>
+#include <smc-rmi.h>
+#include <smc-rsi.h>
+#include <status.h>
+#include <sve.h>
+#include <sysreg_traps.h>
+#include <table.h>
+
+void save_fpu_state(struct fpu_state *fpu);
+void restore_fpu_state(struct fpu_state *fpu);
+
+static void system_abort(void)
+{
+	/*
+	 * TODO: report the abort to the EL3.
+	 * We need to establish the exact EL3 API first.
+	 */
+	assert(false);
+}
+
+static bool fixup_aarch32_data_abort(struct rec *rec, unsigned long *esr)
+{
+	unsigned long spsr = read_spsr_el2();
+
+	if ((spsr & SPSR_EL2_nRW_AARCH32) != 0UL) {
+		/*
+		 * mmio emulation of AArch32 reads/writes is not supported.
+		 */
+		*esr &= ~ESR_EL2_ABORT_ISV_BIT;
+		return true;
+	}
+	return false;
+}
+
+static unsigned long get_dabt_write_value(struct rec *rec, unsigned long esr)
+{
+	unsigned int rt = esr_srt(esr);
+
+	/* Handle xzr */
+	if (rt == 31U) {
+		return 0UL;
+	}
+	return rec->regs[rt] & access_mask(esr);
+}
+
+/*
+ * Returns 'true' if access from @rec to @addr is within the Protected IPA space.
+ */
+static bool access_in_rec_par(struct rec *rec, unsigned long addr)
+{
+	/*
+	 * It is OK to check only the base address of the access because:
+	 * - The Protected IPA space starts at address zero.
+	 * - The IPA width is below 64 bits, therefore the access cannot
+	 *   wrap around.
+	 */
+	return addr_in_rec_par(rec, addr);
+}
+
+/*
+ * Returns 'true' if the @ipa is in PAR and its RIPAS is 'empty'.
+ *
+ * @ipa must be aligned to the granule size.
+ */
+static bool ipa_is_empty(unsigned long ipa, struct rec *rec)
+{
+	unsigned long s2tte, *ll_table;
+	struct rtt_walk wi;
+	enum ripas ripas;
+	bool ret;
+
+	assert(GRANULE_ALIGNED(ipa));
+
+	if (!addr_in_rec_par(rec, ipa)) {
+		return false;
+	}
+	granule_lock(rec->realm_info.g_rtt, GRANULE_STATE_RTT);
+
+	rtt_walk_lock_unlock(rec->realm_info.g_rtt,
+			     rec->realm_info.s2_starting_level,
+			     rec->realm_info.ipa_bits,
+			     ipa, RTT_PAGE_LEVEL, &wi);
+
+	ll_table = granule_map(wi.g_llt, SLOT_RTT);
+	s2tte = s2tte_read(&ll_table[wi.index]);
+
+	if (s2tte_is_destroyed(s2tte)) {
+		ret = false;
+		goto out_unmap_ll_table;
+	}
+	ripas = s2tte_get_ripas(s2tte);
+	ret = (ripas == RMI_EMPTY);
+
+out_unmap_ll_table:
+	buffer_unmap(ll_table);
+	granule_unlock(wi.g_llt);
+	return ret;
+}
+
+static bool fsc_is_external_abort(unsigned long fsc)
+{
+	if (fsc == ESR_EL2_ABORT_FSC_SEA) {
+		return true;
+	}
+
+	if ((fsc >= ESR_EL2_ABORT_FSC_SEA_TTW_START) &&
+	    (fsc <= ESR_EL2_ABORT_FSC_SEA_TTW_END)) {
+		return true;
+	}
+
+	return false;
+}
+
+/*
+ * Handles Data/Instruction Aborts at a lower EL with External Abort fault
+ * status code (D/IFSC).
+ * Returns 'true' if the exception is the external abort and the `rec_exit`
+ * structure is populated, 'false' otherwise.
+ */
+static bool handle_sync_external_abort(struct rec *rec,
+				       struct rmi_rec_exit *rec_exit,
+				       unsigned long esr)
+{
+	unsigned long fsc = esr & ESR_EL2_ABORT_FSC_MASK;
+	unsigned long set = esr & ESR_EL2_ABORT_SET_MASK;
+
+	if (!fsc_is_external_abort(fsc)) {
+		return false;
+	}
+
+	switch (set) {
+	case ESR_EL2_ABORT_SET_UER:
+		/*
+		 * The recoverable SEA.
+		 * Inject the sync. abort into the Realm.
+		 * Report the exception to the host.
+		 */
+		inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
+		/*
+		 * Fall through.
+		 */
+	case ESR_EL2_ABORT_SET_UEO:
+		/*
+		 * The restartable SEA.
+		 * Report the exception to the host.
+		 * The REC restarts the same instruction.
+		 */
+		rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
+
+		/*
+		 * The value of the HPFAR_EL2 is not provided to the host as
+		 * it is undefined for external aborts.
+		 *
+		 * We also don't provide the content of FAR_EL2 because it
+		 * has no practical value to the host without the HPFAR_EL2.
+		 */
+		break;
+	case ESR_EL2_ABORT_SET_UC:
+		/*
+		 * The uncontainable SEA.
+		 * Fatal to the system.
+		 */
+		system_abort();
+		break;
+	default:
+		assert(false);
+	}
+
+	return true;
+}
+
+void emulate_stage2_data_abort(struct rec *rec,
+			       struct rmi_rec_exit *rec_exit,
+			       unsigned long rtt_level)
+{
+	unsigned long fipa = rec->regs[1];
+
+	assert(rtt_level <= RTT_PAGE_LEVEL);
+
+	/*
+	 * Setup Exception Syndrom Register to emulate a real data abort
+	 * and return to NS host to handle it.
+	 */
+	rec_exit->esr = (ESR_EL2_EC_DATA_ABORT |
+			(ESR_EL2_ABORT_FSC_TRANSLATION_FAULT_L0 + rtt_level));
+	rec_exit->far = 0UL;
+	rec_exit->hpfar = fipa >> HPFAR_EL2_FIPA_OFFSET;
+	rec_exit->exit_reason = RMI_EXIT_SYNC;
+}
+
+/*
+ * Returns 'true' if the abort is handled and the RMM should return to the Realm,
+ * and returns 'false' if the exception should be reported to the HS host.
+ */
+static bool handle_data_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
+			      unsigned long esr)
+{
+	unsigned long far = 0UL;
+	unsigned long hpfar = read_hpfar_el2();
+	unsigned long fipa = (hpfar & HPFAR_EL2_FIPA_MASK) << HPFAR_EL2_FIPA_OFFSET;
+	unsigned long write_val = 0UL;
+
+	if (handle_sync_external_abort(rec, rec_exit, esr)) {
+		/*
+		 * All external aborts are immediately reported to the host.
+		 */
+		return false;
+	}
+
+	/*
+	 * The memory access that crosses a page boundary may cause two aborts
+	 * with `hpfar_el2` values referring to two consecutive pages.
+	 *
+	 * Insert the SEA and return to the Realm if the granule's RIPAS is EMPTY.
+	 */
+	if (ipa_is_empty(fipa, rec)) {
+		inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
+		return true;
+	}
+
+	if (fixup_aarch32_data_abort(rec, &esr) ||
+	    access_in_rec_par(rec, fipa)) {
+		esr &= ESR_NONEMULATED_ABORT_MASK;
+		goto end;
+	}
+
+	if (esr_is_write(esr)) {
+		write_val = get_dabt_write_value(rec, esr);
+	}
+
+	far = read_far_el2() & ~GRANULE_MASK;
+	esr &= ESR_EMULATED_ABORT_MASK;
+
+end:
+	rec_exit->esr = esr;
+	rec_exit->far = far;
+	rec_exit->hpfar = hpfar;
+	rec_exit->gprs[0] = write_val;
+
+	return false;
+}
+
+/*
+ * Returns 'true' if the abort is handled and the RMM should return to the Realm,
+ * and returns 'false' if the exception should be reported to the NS host.
+ */
+static bool handle_instruction_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
+				     unsigned long esr)
+{
+	unsigned long fsc = esr & ESR_EL2_ABORT_FSC_MASK;
+	unsigned long fsc_type = fsc & ~ESR_EL2_ABORT_FSC_LEVEL_MASK;
+	unsigned long hpfar = read_hpfar_el2();
+	unsigned long fipa = (hpfar & HPFAR_EL2_FIPA_MASK) << HPFAR_EL2_FIPA_OFFSET;
+
+	if (handle_sync_external_abort(rec, rec_exit, esr)) {
+		/*
+		 * All external aborts are immediately reported to the host.
+		 */
+		return false;
+	}
+
+	/*
+	 * Insert the SEA and return to the Realm if:
+	 * - The instruction abort is at an Unprotected IPA, or
+	 * - The granule's RIPAS is EMPTY
+	 */
+	if (!access_in_rec_par(rec, fipa) || ipa_is_empty(fipa, rec)) {
+		inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
+		return true;
+	}
+
+	if (fsc_type != ESR_EL2_ABORT_FSC_TRANSLATION_FAULT) {
+		unsigned long far = read_far_el2();
+
+		/*
+		 * TODO: Should this ever happen, or is it an indication of an
+		 * internal consistency failure in the RMM which should lead
+		 * to a panic instead?
+		 */
+
+		ERROR("Unhandled instruction abort:\n");
+		ERROR("    FSC: %12s0x%02lx\n", " ", fsc);
+		ERROR("    FAR: %16lx\n", far);
+		ERROR("  HPFAR: %16lx\n", hpfar);
+		return false;
+	}
+
+	rec_exit->hpfar = hpfar;
+	rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
+
+	return false;
+}
+
+/*
+ * Return 'false' if no IRQ is pending,
+ * return 'true' if there is an IRQ pending, and need to return to host.
+ */
+static bool check_pending_irq(void)
+{
+	unsigned long pending_irq;
+
+	pending_irq = read_isr_el1();
+
+	return (pending_irq != 0UL);
+}
+
+static void advance_pc(void)
+{
+	unsigned long pc = read_elr_el2();
+
+	write_elr_el2(pc + 4UL);
+}
+
+static void return_result_to_realm(struct rec *rec, struct smc_result result)
+{
+	rec->regs[0] = result.x[0];
+	rec->regs[1] = result.x[1];
+	rec->regs[2] = result.x[2];
+	rec->regs[3] = result.x[3];
+}
+
+/*
+ * Return 'true' if execution should continue in the REC, otherwise return
+ * 'false' to go back to the NS caller of REC.Enter.
+ */
+static bool handle_realm_rsi(struct rec *rec, struct rmi_rec_exit *rec_exit)
+{
+	bool ret_to_rec = true;	/* Return to Realm */
+	unsigned int function_id = rec->regs[0];
+
+	RSI_LOG_SET(rec->regs[1], rec->regs[2],
+		    rec->regs[3], rec->regs[4], rec->regs[5]);
+
+	if (!IS_SMC32_PSCI_FID(function_id) && !IS_SMC64_PSCI_FID(function_id)
+	    && !IS_SMC64_RSI_FID(function_id)) {
+
+		ERROR("Invalid RSI function_id = %x\n", function_id);
+		rec->regs[0] = SMC_UNKNOWN;
+		return true;
+	}
+
+	switch (function_id) {
+	case SMCCC_VERSION:
+		rec->regs[0] = SMCCC_VERSION_NUMBER;
+		break;
+	case SMC_RSI_ABI_VERSION:
+		rec->regs[0] = system_rsi_abi_version();
+		break;
+	case SMC32_PSCI_FID_MIN ... SMC32_PSCI_FID_MAX:
+	case SMC64_PSCI_FID_MIN ... SMC64_PSCI_FID_MAX: {
+		struct psci_result res;
+
+		res = psci_rsi(rec,
+			       function_id,
+			       rec->regs[1],
+			       rec->regs[2],
+			       rec->regs[3]);
+
+		if (!rec->psci_info.pending) {
+			rec->regs[0] = res.smc_res.x[0];
+			rec->regs[1] = res.smc_res.x[1];
+			rec->regs[2] = res.smc_res.x[2];
+			rec->regs[3] = res.smc_res.x[3];
+		}
+
+		if (res.hvc_forward.forward_psci_call) {
+			unsigned int i;
+
+			rec_exit->exit_reason = RMI_EXIT_PSCI;
+			rec_exit->gprs[0] = function_id;
+			rec_exit->gprs[1] = res.hvc_forward.x1;
+			rec_exit->gprs[2] = res.hvc_forward.x2;
+			rec_exit->gprs[3] = res.hvc_forward.x3;
+
+			for (i = 4U; i < REC_EXIT_NR_GPRS; i++) {
+				rec_exit->gprs[i] = 0UL;
+			}
+
+			advance_pc();
+			ret_to_rec = false;
+		}
+		break;
+	}
+	case SMC_RSI_ATTEST_TOKEN_INIT:
+		rec->regs[0] = handle_rsi_attest_token_init(rec);
+		break;
+	case SMC_RSI_ATTEST_TOKEN_CONTINUE: {
+		struct attest_result res;
+		attest_realm_token_sign_continue_start();
+		while (true) {
+			/*
+			 * Possible outcomes:
+			 *     if res.incomplete is true
+			 *         if IRQ pending
+			 *             check for pending IRQ and return to host
+			 *         else try a new iteration
+			 *     else
+			 *         if RTT table walk has failed,
+			 *             emulate data abort back to host
+			 *         otherwise
+			 *             return to realm because the token
+			 *             creation is complete or input parameter
+			 *             validation failed.
+			 */
+			handle_rsi_attest_token_continue(rec, &res);
+
+			if (res.incomplete) {
+				if (check_pending_irq()) {
+					rec_exit->exit_reason = RMI_EXIT_IRQ;
+					/* Return to NS host to handle IRQ. */
+					ret_to_rec = false;
+					break;
+				}
+			} else {
+				if (res.walk_result.abort) {
+					emulate_stage2_data_abort(
+						rec, rec_exit,
+						res.walk_result.rtt_level);
+					ret_to_rec = false; /* Exit to Host */
+					break;
+				}
+
+				/* Return to Realm */
+				return_result_to_realm(rec, res.smc_res);
+				break;
+			}
+		}
+		attest_realm_token_sign_continue_finish();
+		break;
+	}
+	case SMC_RSI_MEASUREMENT_READ:
+		rec->regs[0] = handle_rsi_read_measurement(rec);
+		break;
+	case SMC_RSI_MEASUREMENT_EXTEND:
+		rec->regs[0] = handle_rsi_extend_measurement(rec);
+		break;
+	case SMC_RSI_REALM_CONFIG: {
+		struct rsi_config_result res;
+
+		res = handle_rsi_realm_config(rec);
+		if (res.walk_result.abort) {
+			emulate_stage2_data_abort(rec, rec_exit,
+						  res.walk_result.rtt_level);
+			ret_to_rec = false; /* Exit to Host */
+		} else {
+			/* Return to Realm */
+			return_result_to_realm(rec, res.smc_res);
+		}
+		break;
+	}
+	case SMC_RSI_IPA_STATE_SET:
+		if (handle_rsi_ipa_state_set(rec, rec_exit)) {
+			rec->regs[0] = RSI_ERROR_INPUT;
+		} else {
+			advance_pc();
+			ret_to_rec = false; /* Return to Host */
+		}
+		break;
+	case SMC_RSI_IPA_STATE_GET: {
+		enum ripas ripas;
+
+		rec->regs[0] = handle_rsi_ipa_state_get(rec, rec->regs[1],
+							&ripas);
+		if (rec->regs[0] == RSI_SUCCESS) {
+			rec->regs[1] = ripas;
+		}
+		break;
+	}
+	case SMC_RSI_HOST_CALL: {
+		struct rsi_host_call_result res;
+
+		res = handle_rsi_host_call(rec, rec_exit);
+
+		if (res.walk_result.abort) {
+			emulate_stage2_data_abort(rec, rec_exit,
+						  res.walk_result.rtt_level);
+		} else {
+			rec->regs[0] = res.smc_result;
+
+			/*
+			 * Return to Realm in case of error,
+			 * parent function calls advance_pc()
+			 */
+			if (rec->regs[0] == RSI_SUCCESS) {
+				advance_pc();
+
+				/* Exit to Host */
+				rec->host_call = true;
+				rec_exit->exit_reason = RMI_EXIT_HOST_CALL;
+				ret_to_rec = false;
+			}
+		}
+		break;
+	}
+
+	default:
+		rec->regs[0] = SMC_UNKNOWN;
+		break;
+	}
+
+	/* Log RSI call */
+	RSI_LOG_EXIT(function_id, rec->regs[0], ret_to_rec);
+	return ret_to_rec;
+}
+
+/*
+ * Return 'true' if the RMM handled the exception,
+ * 'false' to return to the Non-secure host.
+ */
+static bool handle_exception_sync(struct rec *rec, struct rmi_rec_exit *rec_exit)
+{
+	const unsigned long esr = read_esr_el2();
+
+	switch (esr & ESR_EL2_EC_MASK) {
+	case ESR_EL2_EC_WFX:
+		rec_exit->esr = esr & (ESR_EL2_EC_MASK | ESR_EL2_WFx_TI_BIT);
+		advance_pc();
+		return false;
+	case ESR_EL2_EC_HVC:
+		realm_inject_undef_abort();
+		return true;
+	case ESR_EL2_EC_SMC:
+		if (!handle_realm_rsi(rec, rec_exit)) {
+			return false;
+		}
+		/*
+		 * Advance PC.
+		 * HCR_EL2.TSC traps execution of the SMC instruction.
+		 * It is not a routing control for the SMC exception.
+		 * Trap exceptions and SMC exceptions have different
+		 * preferred return addresses.
+		 */
+		advance_pc();
+		return true;
+	case ESR_EL2_EC_SYSREG: {
+		bool ret = handle_sysreg_access_trap(rec, rec_exit, esr);
+
+		advance_pc();
+		return ret;
+	}
+	case ESR_EL2_EC_INST_ABORT:
+		return handle_instruction_abort(rec, rec_exit, esr);
+	case ESR_EL2_EC_DATA_ABORT:
+		return handle_data_abort(rec, rec_exit, esr);
+	case ESR_EL2_EC_FPU: {
+		unsigned long cptr;
+
+		/*
+		 * Realm has requested FPU/SIMD access, so save NS state and
+		 * load realm state.  Start by disabling traps so we can save
+		 * the NS state and load the realm state.
+		 */
+		cptr = read_cptr_el2();
+		cptr &= ~(CPTR_EL2_FPEN_MASK << CPTR_EL2_FPEN_SHIFT);
+		cptr |= (CPTR_EL2_FPEN_NO_TRAP_11 << CPTR_EL2_FPEN_SHIFT);
+		cptr &= ~(CPTR_EL2_ZEN_MASK << CPTR_EL2_ZEN_SHIFT);
+		cptr |= (CPTR_EL2_ZEN_NO_TRAP_11 << CPTR_EL2_ZEN_SHIFT);
+		write_cptr_el2(cptr);
+
+		/*
+		 * Save NS state, restore realm state, and set flag indicating
+		 * realm has used FPU so we know to save and restore NS state at
+		 * realm exit.
+		 */
+		if (rec->ns->sve != NULL) {
+			save_sve_state(rec->ns->sve);
+		} else {
+			assert(rec->ns->fpu != NULL);
+			fpu_save_state(rec->ns->fpu);
+		}
+		fpu_restore_state(&rec->fpu_ctx.fpu);
+		rec->fpu_ctx.used = true;
+
+		/*
+		 * Disable SVE for now, until per rec save/restore is
+		 * implemented
+		 */
+		cptr = read_cptr_el2();
+		cptr &= ~(CPTR_EL2_ZEN_MASK << CPTR_EL2_ZEN_SHIFT);
+		cptr |= (CPTR_EL2_ZEN_TRAP_ALL_00 << CPTR_EL2_ZEN_SHIFT);
+		write_cptr_el2(cptr);
+
+		/*
+		 * Return 'true' indicating that this exception
+		 * has been handled and execution can continue.
+		 */
+		return true;
+	}
+	default:
+		/*
+		 * TODO: Check if there are other exit reasons we could
+		 * encounter here and handle them appropriately
+		 */
+		break;
+	}
+
+	VERBOSE("Unhandled sync exit ESR: %08lx (EC: %lx ISS: %lx)\n",
+		esr,
+		(esr & ESR_EL2_EC_MASK) >> ESR_EL2_EC_SHIFT,
+		(esr & ESR_EL2_ISS_MASK) >> ESR_EL2_ISS_SHIFT);
+
+	/*
+	 * Zero values in esr, far & hpfar of 'rec_exit' structure
+	 * will be returned to the NS host.
+	 * The only information that may leak is when there was
+	 * some unhandled/unknown reason for the exception.
+	 */
+	return false;
+}
+
+/*
+ * Return 'true' if the RMM handled the exception, 'false' to return to the
+ * Non-secure host.
+ */
+static bool handle_exception_serror_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
+{
+	const unsigned long esr = read_esr_el2();
+
+	if (esr & ESR_EL2_SERROR_IDS_BIT) {
+		/*
+		 * Implementation defined content of the esr.
+		 */
+		system_abort();
+	}
+
+	if ((esr & ESR_EL2_SERROR_DFSC_MASK) != ESR_EL2_SERROR_DFSC_ASYNC) {
+		/*
+		 * Either Uncategorized or Reserved fault status code.
+		 */
+		system_abort();
+	}
+
+	switch (esr & ESR_EL2_SERROR_AET_MASK) {
+	case ESR_EL2_SERROR_AET_UEU:	/* Unrecoverable RAS Error */
+	case ESR_EL2_SERROR_AET_UER:	/* Recoverable RAS Error */
+		/*
+		 * The abort is fatal to the current S/W. Inject the SError into
+		 * the Realm so it can e.g. shut down gracefully or localize the
+		 * problem at the specific EL0 application.
+		 *
+		 * Note: Consider shutting down the Realm here to avoid
+		 * the host's attack on unstable Realms.
+		 */
+		inject_serror(rec, esr);
+		/*
+		 * Fall through.
+		 */
+	case ESR_EL2_SERROR_AET_CE:	/* Corrected RAS Error */
+	case ESR_EL2_SERROR_AET_UEO:	/* Restartable RAS Error */
+		/*
+		 * Report the exception to the host.
+		 */
+		rec_exit->esr = esr & ESR_SERROR_MASK;
+		break;
+	case ESR_EL2_SERROR_AET_UC:	/* Uncontainable RAS Error */
+		system_abort();
+		break;
+	default:
+		/*
+		 * Unrecognized Asynchronous Error Type
+		 */
+		assert(false);
+	}
+
+	return false;
+}
+
+static bool handle_exception_irq_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
+{
+	(void)rec;
+
+	rec_exit->exit_reason = RMI_EXIT_IRQ;
+
+	/*
+	 * With GIC all virtual interrupt programming
+	 * must go via the NS hypervisor.
+	 */
+	return false;
+}
+
+/* Returns 'true' when returning to Realm (S) and false when to NS */
+bool handle_realm_exit(struct rec *rec, struct rmi_rec_exit *rec_exit, int exception)
+{
+	switch (exception) {
+	case ARM_EXCEPTION_SYNC_LEL: {
+		bool ret;
+
+		/*
+		 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
+		 * information.
+		 */
+		rec_exit->exit_reason = RMI_EXIT_SYNC;
+		ret = handle_exception_sync(rec, rec_exit);
+		if (!ret) {
+			rec->last_run_info.esr = read_esr_el2();
+			rec->last_run_info.far = read_far_el2();
+			rec->last_run_info.hpfar = read_hpfar_el2();
+		}
+		return ret;
+
+		/*
+		 * TODO: Much more detailed handling of exit reasons.
+		 */
+	}
+	case ARM_EXCEPTION_IRQ_LEL:
+		return handle_exception_irq_lel(rec, rec_exit);
+	case ARM_EXCEPTION_FIQ_LEL:
+		rec_exit->exit_reason = RMI_EXIT_FIQ;
+		break;
+	case ARM_EXCEPTION_SERROR_LEL: {
+		const unsigned long esr = read_esr_el2();
+		bool ret;
+
+		/*
+		 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
+		 * information.
+		 */
+		rec_exit->exit_reason = RMI_EXIT_SERROR;
+		ret = handle_exception_serror_lel(rec, rec_exit);
+		if (!ret) {
+			rec->last_run_info.esr = esr;
+			rec->last_run_info.far = read_far_el2();
+			rec->last_run_info.hpfar = read_hpfar_el2();
+		}
+		return ret;
+	}
+	default:
+		INFO("Unrecognized exit reason: %d\n", exception);
+		break;
+	};
+
+	return false;
+}
diff --git a/runtime/core/fake_host/runtime_core_stub.c b/runtime/core/fake_host/runtime_core_stub.c
new file mode 100644
index 0000000..e71ca2e
--- /dev/null
+++ b/runtime/core/fake_host/runtime_core_stub.c
@@ -0,0 +1,23 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <host_harness.h>
+#include <run.h>
+
+bool memcpy_ns_read(void *dest, const void *ns_src, unsigned long size)
+{
+	return host_memcpy_ns_read(dest, ns_src, size);
+}
+
+
+bool memcpy_ns_write(void *ns_dest, const void *src, unsigned long size)
+{
+	return host_memcpy_ns_write(ns_dest, src, size);
+}
+
+int run_realm(unsigned long *regs)
+{
+	return host_run_realm(regs);
+}
diff --git a/runtime/core/handler.c b/runtime/core/handler.c
new file mode 100644
index 0000000..b7f3a55
--- /dev/null
+++ b/runtime/core/handler.c
@@ -0,0 +1,388 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <buffer.h>
+#include <debug.h>
+#include <sizes.h>
+#include <smc-handler.h>
+#include <smc-rmi.h>
+#include <smc.h>
+#include <status.h>
+#include <utils_def.h>
+
+#define STATUS_HANDLER(_id)[_id] = #_id
+
+const char *status_handler[] = {
+	STATUS_HANDLER(RMI_SUCCESS),
+	STATUS_HANDLER(RMI_ERROR_INPUT),
+	STATUS_HANDLER(RMI_ERROR_REALM),
+	STATUS_HANDLER(RMI_ERROR_REC),
+	STATUS_HANDLER(RMI_ERROR_RTT),
+	STATUS_HANDLER(RMI_ERROR_IN_USE)
+};
+COMPILER_ASSERT(ARRAY_LEN(status_handler) == RMI_ERROR_COUNT);
+
+/*
+ * At this level (in handle_ns_smc) we distinguish the RMI calls only on:
+ * - The number of input arguments [0..4], and whether
+ * - The function returns up to three output values in addition
+ *   to the return status code.
+ * Hence, the naming syntax is:
+ * - `*_[0..4]` when no output values are returned, and
+ * - `*_[0..4]_o` when the function returns some output values.
+ */
+
+typedef unsigned long (*handler_0)(void);
+typedef unsigned long (*handler_1)(unsigned long arg0);
+typedef unsigned long (*handler_2)(unsigned long arg0, unsigned long arg1);
+typedef unsigned long (*handler_3)(unsigned long arg0, unsigned long arg1,
+				   unsigned long arg2);
+typedef unsigned long (*handler_4)(unsigned long arg0, unsigned long arg1,
+				   unsigned long arg2, unsigned long arg3);
+typedef unsigned long (*handler_5)(unsigned long arg0, unsigned long arg1,
+				   unsigned long arg2, unsigned long arg3,
+				   unsigned long arg4);
+typedef void (*handler_1_o)(unsigned long arg0, struct smc_result *ret);
+typedef void (*handler_3_o)(unsigned long arg0, unsigned long arg1,
+			    unsigned long arg2, struct smc_result *ret);
+
+enum rmi_type {
+	rmi_type_0,
+	rmi_type_1,
+	rmi_type_2,
+	rmi_type_3,
+	rmi_type_4,
+	rmi_type_5,
+	rmi_type_1_o,
+	rmi_type_3_o
+};
+
+struct smc_handler {
+	const char	*fn_name;
+	enum rmi_type	type;
+	union {
+		handler_0	f0;
+		handler_1	f1;
+		handler_2	f2;
+		handler_3	f3;
+		handler_4	f4;
+		handler_5	f5;
+		handler_1_o	f1_o;
+		handler_3_o	f3_o;
+		void		*fn_dummy;
+	};
+	bool		log_exec;	/* print handler execution */
+	bool		log_error;	/* print in case of error status */
+	unsigned int	out_values;	/* number of output values */
+};
+
+/*
+ * Get handler ID from FID
+ * Precondition: FID is an RMI call
+ */
+#define SMC_RMI_HANDLER_ID(_fid) SMC64_FID_OFFSET_FROM_RANGE_MIN(RMI, _fid)
+
+#define HANDLER_0(_id, _fn, _exec, _error)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_0, .f0 = _fn, .log_exec = _exec, .log_error = _error,	   \
+	.out_values = 0U }
+#define HANDLER_1(_id, _fn, _exec, _error)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_1, .f1 = _fn, .log_exec = _exec, .log_error = _error,	   \
+	.out_values = 0U }
+#define HANDLER_2(_id, _fn, _exec, _error)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_2, .f2 = _fn, .log_exec = _exec, .log_error = _error,     \
+	.out_values = 0U }
+#define HANDLER_3(_id, _fn, _exec, _error)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_3, .f3 = _fn, .log_exec = _exec, .log_error = _error,	   \
+	.out_values = 0U }
+#define HANDLER_4(_id, _fn, _exec, _error)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_4, .f4 = _fn, .log_exec = _exec, .log_error = _error,	   \
+	.out_values = 0U }
+#define HANDLER_5(_id, _fn, _exec, _error)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_5, .f5 = _fn, .log_exec = _exec, .log_error = _error,	   \
+	.out_values = 0U }
+#define HANDLER_1_O(_id, _fn, _exec, _error, _values)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_1_o, .f1_o = _fn, .log_exec = _exec, .log_error = _error, \
+	.out_values = _values }
+#define HANDLER_3_O(_id, _fn, _exec, _error, _values)[SMC_RMI_HANDLER_ID(_id)] = { \
+	.fn_name = #_id, \
+	.type = rmi_type_3_o, .f3_o = _fn, .log_exec = _exec, .log_error = _error, \
+	.out_values = _values }
+
+/*
+ * The 3rd value enables the execution log.
+ * The 4th value enables the error log.
+ */
+static const struct smc_handler smc_handlers[] = {
+	HANDLER_0(SMC_RMM_VERSION,		 smc_version,			true,  true),
+	HANDLER_1_O(SMC_RMM_FEATURES,		 smc_read_feature_register,	true,  true, 1U),
+	HANDLER_1(SMC_RMM_GRANULE_DELEGATE,	 smc_granule_delegate,		false, true),
+	HANDLER_1(SMC_RMM_GRANULE_UNDELEGATE,	 smc_granule_undelegate,	false, true),
+	HANDLER_2(SMC_RMM_REALM_CREATE,		 smc_realm_create,		true,  true),
+	HANDLER_1(SMC_RMM_REALM_DESTROY,	 smc_realm_destroy,		true,  true),
+	HANDLER_1(SMC_RMM_REALM_ACTIVATE,	 smc_realm_activate,		true,  true),
+	HANDLER_3(SMC_RMM_REC_CREATE,		 smc_rec_create,		true,  true),
+	HANDLER_1(SMC_RMM_REC_DESTROY,		 smc_rec_destroy,		true,  true),
+	HANDLER_2(SMC_RMM_REC_ENTER,		 smc_rec_enter,			false, true),
+	HANDLER_5(SMC_RMM_DATA_CREATE,		 smc_data_create,		false, false),
+	HANDLER_3(SMC_RMM_DATA_CREATE_UNKNOWN,	 smc_data_create_unknown,	false, false),
+	HANDLER_2(SMC_RMM_DATA_DESTROY,		 smc_data_destroy,		false, true),
+	HANDLER_4(SMC_RMM_RTT_CREATE,		 smc_rtt_create,		false, true),
+	HANDLER_4(SMC_RMM_RTT_DESTROY,		 smc_rtt_destroy,		false, true),
+	HANDLER_4(SMC_RMM_RTT_FOLD,		 smc_rtt_fold,			false, true),
+	HANDLER_4(SMC_RMM_RTT_MAP_UNPROTECTED,	 smc_rtt_map_unprotected,	false, false),
+	HANDLER_3(SMC_RMM_RTT_UNMAP_UNPROTECTED, smc_rtt_unmap_unprotected,	false, false),
+	HANDLER_3_O(SMC_RMM_RTT_READ_ENTRY,	 smc_rtt_read_entry,		false, true, 4U),
+	HANDLER_2(SMC_RMM_PSCI_COMPLETE,	 smc_psci_complete,		true,  true),
+	HANDLER_1_O(SMC_RMM_REC_AUX_COUNT,	 smc_rec_aux_count,		true,  true, 1U),
+	HANDLER_3(SMC_RMM_RTT_INIT_RIPAS,	 smc_rtt_init_ripas,		false, true),
+	HANDLER_5(SMC_RMM_RTT_SET_RIPAS,	 smc_rtt_set_ripas,		false, true)
+};
+
+COMPILER_ASSERT(ARRAY_LEN(smc_handlers) == SMC64_NUM_FIDS_IN_RANGE(RMI));
+
+static bool rmi_call_log_enabled = true;
+
+static void rmi_log_on_exit(unsigned long handler_id,
+			    unsigned long arg0,
+			    unsigned long arg1,
+			    unsigned long arg2,
+			    unsigned long arg3,
+			    unsigned long arg4,
+			    struct smc_result *ret)
+{
+	const struct smc_handler *handler = &smc_handlers[handler_id];
+	unsigned long function_id = SMC64_RMI_FID(handler_id);
+	unsigned int i;
+	return_code_t rc;
+
+	if (!handler->log_exec && !handler->log_error) {
+		return;
+	}
+
+	if (function_id == SMC_RMM_VERSION) {
+		/*
+		 * RMM_VERSION is special because it returns the
+		 * version number, not the error code.
+		 */
+		INFO("%-29s %8lx %8lx %8lx %8lx %8lx > %lx\n",
+		     handler->fn_name, arg0, arg1, arg2, arg3, arg4,
+		     ret->x[0]);
+		return;
+	}
+
+	rc = unpack_return_code(ret->x[0]);
+
+	if ((handler->log_exec) ||
+	    (handler->log_error && (rc.status != RMI_SUCCESS))) {
+		INFO("%-29s %8lx %8lx %8lx %8lx %8lx > ",
+			handler->fn_name, arg0, arg1, arg2, arg3, arg4);
+		if (rc.status >= RMI_ERROR_COUNT) {
+			INFO("%lx", ret->x[0]);
+		} else {
+			INFO("%s", status_handler[rc.status]);
+		}
+
+		/* Check for index */
+		if (((function_id == SMC_RMM_REC_ENTER) &&
+		     (rc.status == RMI_ERROR_REALM)) ||
+		     (rc.status == RMI_ERROR_RTT)) {
+			INFO(" %x", rc.index);
+		}
+
+		/* Print output values */
+		for (i = 1U; i <= handler->out_values; i++) {
+			INFO(" %8lx", ret->x[i]);
+		}
+
+		INFO("\n");
+	}
+}
+
+void handle_ns_smc(unsigned long function_id,
+		   unsigned long arg0,
+		   unsigned long arg1,
+		   unsigned long arg2,
+		   unsigned long arg3,
+		   unsigned long arg4,
+		   unsigned long arg5,
+		   struct smc_result *ret)
+{
+	unsigned long handler_id;
+	const struct smc_handler *handler = NULL;
+
+	if (IS_SMC64_RMI_FID(function_id)) {
+		handler_id = SMC_RMI_HANDLER_ID(function_id);
+		if (handler_id < ARRAY_LEN(smc_handlers)) {
+			handler = &smc_handlers[handler_id];
+		}
+	}
+
+	/*
+	 * Check if handler exists and 'fn_dummy' is not NULL
+	 * for not implemented 'function_id' calls in SMC RMI range.
+	 */
+	if ((handler == NULL) || (handler->fn_dummy == NULL)) {
+		VERBOSE("[%s] unknown function_id: %lx\n",
+			__func__, function_id);
+		ret->x[0] = SMC_UNKNOWN;
+		return;
+	}
+
+	assert_cpu_slots_empty();
+
+	switch (handler->type) {
+	case rmi_type_0:
+		ret->x[0] = handler->f0();
+		break;
+	case rmi_type_1:
+		ret->x[0] = handler->f1(arg0);
+		break;
+	case rmi_type_2:
+		ret->x[0] = handler->f2(arg0, arg1);
+		break;
+	case rmi_type_3:
+		ret->x[0] = handler->f3(arg0, arg1, arg2);
+		break;
+	case rmi_type_4:
+		ret->x[0] = handler->f4(arg0, arg1, arg2, arg3);
+		break;
+	case rmi_type_5:
+		ret->x[0] = handler->f5(arg0, arg1, arg2, arg3, arg4);
+		break;
+	case rmi_type_1_o:
+		handler->f1_o(arg0, ret);
+		break;
+	case rmi_type_3_o:
+		handler->f3_o(arg0, arg1, arg2, ret);
+		break;
+	default:
+		assert(false);
+	}
+
+	if (rmi_call_log_enabled) {
+		rmi_log_on_exit(handler_id, arg0, arg1, arg2, arg3, arg4, ret);
+	}
+
+	assert_cpu_slots_empty();
+}
+
+static void report_unexpected(void)
+{
+	unsigned long spsr = read_spsr_el2();
+	unsigned long esr = read_esr_el2();
+	unsigned long elr = read_elr_el2();
+	unsigned long far = read_far_el2();
+
+	INFO("----\n");
+	INFO("Unexpected exception:\n");
+	INFO("SPSR_EL2: 0x%016lx\n", spsr);
+	INFO("ESR_EL2:  0x%016lx\n", esr);
+	INFO("ELR_EL2:  0x%016lx\n", elr);
+	INFO("FAR_EL2:  0x%016lx\n", far);
+	INFO("----\n");
+
+}
+
+unsigned long handle_realm_trap(unsigned long *regs)
+{
+	report_unexpected();
+
+	while (1) {
+		wfe();
+	}
+}
+
+/*
+ * Identifies an abort that the RMM may recover from.
+ */
+struct rmm_trap_element {
+	/*
+	 * The PC at the time of abort.
+	 */
+	unsigned long aborted_pc;
+	/*
+	 * New value of the PC.
+	 */
+	unsigned long new_pc;
+};
+
+#define RMM_TRAP_HANDLER(_aborted_pc, _new_pc) \
+	{ .aborted_pc = (unsigned long)(&_aborted_pc), \
+	  .new_pc = (unsigned long)(&_new_pc) }
+
+/*
+ * The registered locations of load/store instructions that access NS memory.
+ */
+extern void *ns_read;
+extern void *ns_write;
+
+/*
+ * The new value of the PC when the GPF occurs on a registered location.
+ */
+extern void *ns_access_ret_0;
+
+struct rmm_trap_element rmm_trap_list[] = {
+	RMM_TRAP_HANDLER(ns_read, ns_access_ret_0),
+	RMM_TRAP_HANDLER(ns_write, ns_access_ret_0),
+};
+#define RMM_TRAP_LIST_SIZE (sizeof(rmm_trap_list)/sizeof(struct rmm_trap_element))
+
+static void fatal_abort(void)
+{
+	report_unexpected();
+
+	while (1) {
+		wfe();
+	}
+}
+
+static bool is_el2_data_abort_gpf(unsigned long esr)
+{
+	if (((esr & ESR_EL2_EC_MASK) == ESR_EL2_EC_DATA_ABORT_SEL) &&
+	    ((esr & ESR_EL2_ABORT_FSC_MASK) == ESR_EL2_ABORT_FSC_GPF))
+		return true;
+	return false;
+}
+
+/*
+ * Handles the RMM's aborts.
+ * It compares the PC at the time of the abort with the registered addresses.
+ * If it finds a match, it returns the new value of the PC that the RMM should
+ * continue from. Other register values are preserved.
+ * If no match is found, it aborts the RMM.
+ */
+unsigned long handle_rmm_trap(void)
+{
+	int i;
+
+	unsigned long esr = read_esr_el2();
+	unsigned long elr = read_elr_el2();
+
+	/*
+	 * Only the GPF data aborts are recoverable.
+	 */
+	if (!is_el2_data_abort_gpf(esr)) {
+		fatal_abort();
+	}
+
+	for (i = 0; i < RMM_TRAP_LIST_SIZE; i++) {
+		if (rmm_trap_list[i].aborted_pc == elr) {
+			return rmm_trap_list[i].new_pc;
+		}
+	}
+
+	fatal_abort();
+	return 0;
+}
diff --git a/runtime/core/init.c b/runtime/core/init.c
new file mode 100644
index 0000000..ad86fbc
--- /dev/null
+++ b/runtime/core/init.c
@@ -0,0 +1,88 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch_helpers.h>
+#include <attestation.h>
+#include <buffer.h>
+#include <debug.h>
+#include <rmm_el3_ifc.h>
+#include <smc-rmi.h>
+#include <smc-rsi.h>
+
+#ifdef NDEBUG
+#define RMM_BUILD_TYPE	"release"
+#else
+#define RMM_BUILD_TYPE	"debug"
+#endif
+
+#define VER_STRING(toolchain, major, minor, patch) \
+		toolchain __STRING(major) "." \
+		__STRING(minor) "." __STRING(patch)
+
+static void rmm_arch_init(void)
+{
+	MPAM(write_mpam2_el2(MPAM2_EL2_INIT));
+	MPAM(write_mpamhcr_el2(MPAMHCR_EL2_INIT));
+	SPE(write_pmscr_el2(PMSCR_EL2_INIT));
+
+	write_cnthctl_el2(CNTHCTL_EL2_INIT);
+	write_mdcr_el2(MDCR_EL2_INIT);
+}
+
+void rmm_warmboot_main(void)
+{
+	/*
+	 * Do the rest of RMM architecture init
+	 */
+	rmm_arch_init();
+
+	/*
+	 * Finish initializing the slot buffer mechanism
+	 */
+	slot_buf_init();
+}
+
+void rmm_main(void)
+{
+	unsigned int rmm_el3_ifc_version = rmm_el3_ifc_get_version();
+	unsigned int manifest_version = rmm_el3_ifc_get_manifest_version();
+
+	/*
+	 * Report project name, version, build type and
+	 * commit information if it is present
+	 */
+	NOTICE("Booting %s v.%s(%s) %s Built with %s\n",
+		NAME, VERSION, RMM_BUILD_TYPE, COMMIT_INFO,
+#ifdef __clang__
+	VER_STRING("Clang ", __clang_major__, __clang_minor__,
+		__clang_patchlevel__)
+#else
+	VER_STRING("GCC ", __GNUC__, __GNUC_MINOR__,
+		__GNUC_PATCHLEVEL__)
+#endif
+		);
+
+	/* Report Boot Interface version */
+	NOTICE("RMM-EL3 Interface v.%u.%u\n",
+		RMM_EL3_IFC_GET_VERS_MAJOR(rmm_el3_ifc_version),
+		RMM_EL3_IFC_GET_VERS_MINOR(rmm_el3_ifc_version));
+
+	/* Report Boot Manifest version */
+	NOTICE("Boot Manifest Interface v.%u.%u\n",
+		RMM_EL3_MANIFEST_GET_VERS_MAJOR(manifest_version),
+		RMM_EL3_MANIFEST_GET_VERS_MINOR(manifest_version));
+
+	/* Report RMI/RSI ABI versions and build timestamp */
+	NOTICE("RMI/RSI ABI v.%u.%u/%u.%u built: %s %s\n",
+		RMI_ABI_VERSION_MAJOR, RMI_ABI_VERSION_MINOR,
+		RSI_ABI_VERSION_MAJOR, RSI_ABI_VERSION_MINOR,
+		__DATE__, __TIME__);
+
+	rmm_warmboot_main();
+
+	if (attestation_init() != 0) {
+		WARN("Attestation init failed.\n");
+	}
+}
diff --git a/runtime/core/inject_exp.c b/runtime/core/inject_exp.c
new file mode 100644
index 0000000..cc818f8
--- /dev/null
+++ b/runtime/core/inject_exp.c
@@ -0,0 +1,169 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <inject_exp.h>
+#include <rec.h>
+
+/*
+ * Calculate the address of the vector entry when an exception is inserted
+ * into the Realm.
+ *
+ * @vbar The base address of the vector table in the Realm.
+ * @spsr The Saved Program Status Register at EL2.
+ */
+static unsigned long calc_vector_entry(unsigned long vbar, unsigned long spsr)
+{
+	unsigned long offset;
+
+	if ((spsr & MASK(SPSR_EL2_MODE)) == SPSR_EL2_MODE_EL1h) {
+		offset = VBAR_CEL_SP_ELx_OFFSET;
+	} else if ((spsr & MASK(SPSR_EL2_MODE)) == SPSR_EL2_MODE_EL1t) {
+		offset = VBAR_CEL_SP_EL0_OFFSET;
+	} else if ((spsr & MASK(SPSR_EL2_MODE)) == SPSR_EL2_MODE_EL0t) {
+		if ((spsr & MASK(SPSR_EL2_nRW)) == SPSR_EL2_nRW_AARCH64) {
+			offset = VBAR_LEL_AA64_OFFSET;
+		} else {
+			offset = VBAR_LEL_AA32_OFFSET;
+		}
+	} else {
+		assert(false);
+		offset = 0UL;
+	}
+
+	return vbar + offset;
+}
+
+/*
+ * Calculate the value of the pstate when an exception
+ * is inserted into the Realm.
+ */
+static unsigned long calc_pstate(void)
+{
+	/*
+	 * The pstate is EL1, AArch64, SPSel = SP_ELx and:
+	 * DAIF = '1111b'
+	 * NZCV = '0000b'
+	 * TODO: setup TCO, DIT, UAO, PAN, SSBS, BTYPE
+	 */
+	unsigned long pstate = SPSR_EL2_MODE_EL1h |
+			       SPSR_EL2_nRW_AARCH64 |
+			       SPSR_EL2_F_BIT |
+			       SPSR_EL2_I_BIT |
+			       SPSR_EL2_A_BIT |
+			       SPSR_EL2_D_BIT;
+	return pstate;
+}
+
+/*
+ * Calculate the content of the Realm's esr_el1 register when
+ * the Synchronous Instruction or Data Abort is injected into
+ * the Realm (EL1).
+ *
+ * The value is constructed from the @esr_el2 & @spsr_el2 that
+ * are captured when the exception from the Realm was taken to EL2.
+ *
+ * The fault status code (ESR_EL1.I/DFSC) is set to @fsc
+ */
+static unsigned long calc_esr_idabort(unsigned long esr_el2,
+				      unsigned long spsr_el2,
+				      unsigned long fsc)
+{
+	/*
+	 * Copy esr_el2 into esr_el1 apart from the following fields:
+	 * - The exception class (EC). Its value depends on whether the
+	 *   exception to EL2 was from either EL1 or EL0.
+	 * - I/DFSC. It will be set to @fsc.
+	 * - FnV. It will set to zero.
+	 * - S1PTW. It will be set to zero.
+	 */
+	unsigned long esr_el1 = esr_el2 & ~(ESR_EL2_EC_MASK  |
+					    ESR_EL2_ABORT_FSC_MASK |
+					    ESR_EL2_ABORT_FNV_BIT |
+					    ESR_EL2_ABORT_S1PTW_BIT);
+
+	unsigned long ec = esr_el2 & ESR_EL2_EC_MASK;
+
+	assert((ec == ESR_EL2_EC_INST_ABORT) || (ec == ESR_EL2_EC_DATA_ABORT));
+	if ((spsr_el2 & MASK(SPSR_EL2_MODE)) != SPSR_EL2_MODE_EL0t) {
+		ec += 1UL << ESR_EL2_EC_SHIFT;
+	}
+	esr_el1 |= ec;
+
+	/*
+	 * Set the I/DFSC.
+	 */
+	assert((fsc & ~ESR_EL2_ABORT_FSC_MASK) == 0UL);
+	esr_el1 |= fsc;
+
+	/*
+	 * Set the EA.
+	 */
+	esr_el1 |= ESR_EL2_ABORT_EA_BIT;
+
+	return esr_el1;
+}
+
+/*
+ * Inject the Synchronous Instruction or Data Abort into the current REC.
+ * The I/DFSC field in the ESR_EL1 is set to @fsc
+ */
+void inject_sync_idabort(unsigned long fsc)
+{
+	unsigned long esr_el2 = read_esr_el2();
+	unsigned long far_el2 = read_far_el2();
+	unsigned long elr_el2 = read_elr_el2();
+	unsigned long spsr_el2 = read_spsr_el2();
+	unsigned long vbar_el2 = read_vbar_el12();
+
+	unsigned long esr_el1 = calc_esr_idabort(esr_el2, spsr_el2, fsc);
+	unsigned long pc = calc_vector_entry(vbar_el2, spsr_el2);
+	unsigned long pstate = calc_pstate();
+
+	write_far_el12(far_el2);
+	write_elr_el12(elr_el2);
+	write_spsr_el12(spsr_el2);
+	write_esr_el12(esr_el1);
+	write_elr_el2(pc);
+	write_spsr_el2(pstate);
+}
+
+/*
+ * Inject the Synchronous Instruction or Data Abort into @rec.
+ * The I/DFSC field in the ESR_EL1 is set to @fsc
+ */
+void inject_sync_idabort_rec(struct rec *rec, unsigned long fsc)
+{
+	rec->sysregs.far_el1 = rec->last_run_info.far;
+	rec->sysregs.elr_el1 = rec->pc;
+	rec->sysregs.spsr_el1 = rec->pstate;
+	rec->sysregs.esr_el1 = calc_esr_idabort(rec->last_run_info.esr,
+						rec->pstate, fsc);
+	rec->pc = calc_vector_entry(rec->sysregs.vbar_el1, rec->pstate);
+	rec->pstate = calc_pstate();
+}
+
+/*
+ * Inject the Undefined Synchronous Exception into the current REC.
+ */
+void realm_inject_undef_abort(void)
+{
+	unsigned long esr = ESR_EL2_IL_MASK | ESR_EL2_EC_UNKNOWN;
+	unsigned long elr = read_elr_el2();
+	unsigned long spsr = read_spsr_el2();
+	unsigned long vbar = read_vbar_el12();
+
+	unsigned long pc = calc_vector_entry(vbar, spsr);
+	unsigned long pstate = calc_pstate();
+
+	write_elr_el12(elr);
+	write_spsr_el12(spsr);
+	write_esr_el12(esr);
+
+	write_elr_el2(pc);
+	write_spsr_el2(pstate);
+}
diff --git a/runtime/core/run.c b/runtime/core/run.c
new file mode 100644
index 0000000..9127072
--- /dev/null
+++ b/runtime/core/run.c
@@ -0,0 +1,357 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <arch_features.h>
+#include <attestation.h>
+#include <buffer.h>
+#include <cpuid.h>
+#include <exit.h>
+#include <fpu_helpers.h>
+#include <rec.h>
+#include <run.h>
+#include <smc-rmi.h>
+#include <sve.h>
+#include <timers.h>
+
+static struct ns_state g_ns_data[MAX_CPUS];
+static uint8_t g_sve_data[MAX_CPUS][sizeof(struct sve_state)]
+		__attribute__((aligned(sizeof(__uint128_t))));
+
+/*
+ * Initialize the aux data and any buffer pointers to the aux granule memory for
+ * use by REC when it is entered.
+ */
+static void init_aux_data(struct rec_aux_data *aux_data,
+			  void *rec_aux,
+			  unsigned int num_rec_aux)
+{
+	aux_data->attest_heap_buf = (uint8_t *)rec_aux;
+
+	/* Ensure we have enough aux granules for use by REC */
+	assert(num_rec_aux >= REC_HEAP_PAGES);
+}
+
+/*
+ * The parent REC granules lock is expected to be acquired
+ * before functions map_rec_aux() and unmap_rec_aux() are called.
+ */
+static void *map_rec_aux(struct granule *rec_aux_pages[], unsigned long num_aux)
+{
+	void *rec_aux = NULL;
+
+	for (unsigned long i = 0UL; i < num_aux; i++) {
+		void *aux = granule_map(rec_aux_pages[i], SLOT_REC_AUX0 + i);
+
+		if (i == 0UL) {
+			rec_aux = aux;
+		}
+	}
+	return rec_aux;
+}
+
+static void unmap_rec_aux(void *rec_aux, unsigned long num_aux)
+{
+	unsigned char *rec_aux_vaddr = (unsigned char *)rec_aux;
+
+	for (unsigned long i = 0UL; i < num_aux; i++) {
+		buffer_unmap(rec_aux_vaddr + i * GRANULE_SIZE);
+	}
+}
+
+static void save_sysreg_state(struct sysreg_state *sysregs)
+{
+	sysregs->sp_el0 = read_sp_el0();
+	sysregs->sp_el1 = read_sp_el1();
+	sysregs->elr_el1 = read_elr_el12();
+	sysregs->spsr_el1 = read_spsr_el12();
+	sysregs->pmcr_el0 = read_pmcr_el0();
+	sysregs->pmuserenr_el0 = read_pmuserenr_el0();
+	sysregs->tpidrro_el0 = read_tpidrro_el0();
+	sysregs->tpidr_el0 = read_tpidr_el0();
+	sysregs->csselr_el1 = read_csselr_el1();
+	sysregs->sctlr_el1 = read_sctlr_el12();
+	sysregs->actlr_el1 = read_actlr_el1();
+	sysregs->cpacr_el1 = read_cpacr_el12();
+	sysregs->ttbr0_el1 = read_ttbr0_el12();
+	sysregs->ttbr1_el1 = read_ttbr1_el12();
+	sysregs->tcr_el1 = read_tcr_el12();
+	sysregs->esr_el1 = read_esr_el12();
+	sysregs->afsr0_el1 = read_afsr0_el12();
+	sysregs->afsr1_el1 = read_afsr1_el12();
+	sysregs->far_el1 = read_far_el12();
+	sysregs->mair_el1 = read_mair_el12();
+	sysregs->vbar_el1 = read_vbar_el12();
+
+	sysregs->contextidr_el1 = read_contextidr_el12();
+	sysregs->tpidr_el1 = read_tpidr_el1();
+	sysregs->amair_el1 = read_amair_el12();
+	sysregs->cntkctl_el1 = read_cntkctl_el12();
+	sysregs->par_el1 = read_par_el1();
+	sysregs->mdscr_el1 = read_mdscr_el1();
+	sysregs->mdccint_el1 = read_mdccint_el1();
+	sysregs->disr_el1 = read_disr_el1();
+	MPAM(sysregs->mpam0_el1 = read_mpam0_el1();)
+
+	/* Timer registers */
+	sysregs->cntpoff_el2 = read_cntpoff_el2();
+	sysregs->cntvoff_el2 = read_cntvoff_el2();
+	sysregs->cntp_ctl_el0 = read_cntp_ctl_el02();
+	sysregs->cntp_cval_el0 = read_cntp_cval_el02();
+	sysregs->cntv_ctl_el0 = read_cntv_ctl_el02();
+	sysregs->cntv_cval_el0 = read_cntv_cval_el02();
+}
+
+static void save_realm_state(struct rec *rec)
+{
+	save_sysreg_state(&rec->sysregs);
+
+	rec->pc = read_elr_el2();
+	rec->pstate = read_spsr_el2();
+
+	gic_save_state(&rec->sysregs.gicstate);
+}
+
+static void restore_sysreg_state(struct sysreg_state *sysregs)
+{
+	write_sp_el0(sysregs->sp_el0);
+	write_sp_el1(sysregs->sp_el1);
+	write_elr_el12(sysregs->elr_el1);
+	write_spsr_el12(sysregs->spsr_el1);
+	write_pmcr_el0(sysregs->pmcr_el0);
+	write_pmuserenr_el0(sysregs->pmuserenr_el0);
+	write_tpidrro_el0(sysregs->tpidrro_el0);
+	write_tpidr_el0(sysregs->tpidr_el0);
+	write_csselr_el1(sysregs->csselr_el1);
+	write_sctlr_el12(sysregs->sctlr_el1);
+	write_actlr_el1(sysregs->actlr_el1);
+	write_cpacr_el12(sysregs->cpacr_el1);
+	write_ttbr0_el12(sysregs->ttbr0_el1);
+	write_ttbr1_el12(sysregs->ttbr1_el1);
+	write_tcr_el12(sysregs->tcr_el1);
+	write_esr_el12(sysregs->esr_el1);
+	write_afsr0_el12(sysregs->afsr0_el1);
+	write_afsr1_el12(sysregs->afsr1_el1);
+	write_far_el12(sysregs->far_el1);
+	write_mair_el12(sysregs->mair_el1);
+	write_vbar_el12(sysregs->vbar_el1);
+
+	write_contextidr_el12(sysregs->contextidr_el1);
+	write_tpidr_el1(sysregs->tpidr_el1);
+	write_amair_el12(sysregs->amair_el1);
+	write_cntkctl_el12(sysregs->cntkctl_el1);
+	write_par_el1(sysregs->par_el1);
+	write_mdscr_el1(sysregs->mdscr_el1);
+	write_mdccint_el1(sysregs->mdccint_el1);
+	write_disr_el1(sysregs->disr_el1);
+	MPAM(write_mpam0_el1(sysregs->mpam0_el1);)
+	write_vmpidr_el2(sysregs->vmpidr_el2);
+
+	/* Timer registers */
+	write_cntpoff_el2(sysregs->cntpoff_el2);
+	write_cntvoff_el2(sysregs->cntvoff_el2);
+
+	/*
+	 * Restore CNTx_CVAL registers before CNTx_CTL to avoid
+	 * raising the interrupt signal briefly before lowering
+	 * it again due to some expired CVAL left in the timer
+	 * register.
+	 */
+	write_cntp_cval_el02(sysregs->cntp_cval_el0);
+	write_cntp_ctl_el02(sysregs->cntp_ctl_el0);
+	write_cntv_cval_el02(sysregs->cntv_cval_el0);
+	write_cntv_ctl_el02(sysregs->cntv_ctl_el0);
+}
+
+static void restore_realm_state(struct rec *rec)
+{
+	/*
+	 * Restore this early to give time to the timer mask to propagate to
+	 * the GIC.  Issue an ISB to ensure the register write is actually
+	 * performed before doing the remaining work.
+	 */
+	write_cnthctl_el2(rec->sysregs.cnthctl_el2);
+	isb();
+
+	restore_sysreg_state(&rec->sysregs);
+	write_elr_el2(rec->pc);
+	write_spsr_el2(rec->pstate);
+	write_hcr_el2(rec->sysregs.hcr_el2);
+
+	gic_restore_state(&rec->sysregs.gicstate);
+}
+
+static void configure_realm_stage2(struct rec *rec)
+{
+	write_vtcr_el2(rec->common_sysregs.vtcr_el2);
+	write_vttbr_el2(rec->common_sysregs.vttbr_el2);
+}
+
+static void save_ns_state(struct ns_state *ns_state)
+{
+	save_sysreg_state(&ns_state->sysregs);
+
+	/*
+	 * CNTHCTL_EL2 is saved/restored separately from the main system
+	 * registers, because the Realm configuration is written on every
+	 * entry to the Realm, see `check_pending_timers`.
+	 */
+	ns_state->sysregs.cnthctl_el2 = read_cnthctl_el2();
+
+	ns_state->icc_sre_el2 = read_icc_sre_el2();
+}
+
+static void restore_ns_state(struct ns_state *ns_state)
+{
+	restore_sysreg_state(&ns_state->sysregs);
+
+	/*
+	 * CNTHCTL_EL2 is saved/restored separately from the main system
+	 * registers, because the Realm configuration is written on every
+	 * entry to the Realm, see `check_pending_timers`.
+	 */
+	write_cnthctl_el2(ns_state->sysregs.cnthctl_el2);
+
+	write_icc_sre_el2(ns_state->icc_sre_el2);
+}
+
+static void activate_events(struct rec *rec)
+{
+	/*
+	 * The only event that may be activated at the Realm is the SError.
+	 */
+	if (rec->serror_info.inject) {
+		write_vsesr_el2(rec->serror_info.vsesr_el2);
+		write_hcr_el2(rec->sysregs.hcr_el2 | HCR_VSE);
+		rec->serror_info.inject = false;
+	}
+}
+
+void inject_serror(struct rec *rec, unsigned long vsesr)
+{
+	rec->serror_info.vsesr_el2 = vsesr;
+	rec->serror_info.inject = true;
+}
+
+void rec_run_loop(struct rec *rec, struct rmi_rec_exit *rec_exit)
+{
+	struct ns_state *ns_state;
+	int realm_exception_code;
+	void *rec_aux;
+	unsigned int cpuid = my_cpuid();
+
+	assert(rec->ns == NULL);
+
+	assert(cpuid < MAX_CPUS);
+	ns_state = &g_ns_data[cpuid];
+
+	/* ensure SVE/FPU context is cleared */
+	assert(ns_state->sve == NULL);
+	assert(ns_state->fpu == NULL);
+
+	/* Map auxiliary granules */
+	rec_aux = map_rec_aux(rec->g_aux, rec->num_rec_aux);
+
+	init_aux_data(&(rec->aux_data), rec_aux, rec->num_rec_aux);
+
+	/*
+	 * The attset heap on the REC aux pages is mapped now. It is time to
+	 * associate it with the current CPU.
+	 * This heap will be used for attestation RSI calls when the
+	 * REC is running.
+	 */
+	attestation_heap_ctx_assign_pe(&rec->alloc_info.ctx);
+
+	/*
+	 * Initialise the heap for attestation if necessary.
+	 */
+	if (!rec->alloc_info.ctx_initialised) {
+		(void)attestation_heap_ctx_init(rec->aux_data.attest_heap_buf,
+						REC_HEAP_PAGES * SZ_4K);
+		rec->alloc_info.ctx_initialised = true;
+	}
+
+	if (is_feat_sve_present()) {
+		ns_state->sve = (struct sve_state *)&g_sve_data[cpuid];
+	} else {
+		ns_state->fpu = (struct fpu_state *)&g_sve_data[cpuid];
+	}
+
+	save_ns_state(ns_state);
+	restore_realm_state(rec);
+
+	/* Prepare for lazy save/restore of FPU/SIMD registers. */
+	rec->ns = ns_state;
+	assert(rec->fpu_ctx.used == false);
+
+	configure_realm_stage2(rec);
+
+	do {
+		/*
+		 * We must check the status of the arch timers in every
+		 * iteration of the loop to ensure we update the timer
+		 * mask on each entry to the realm and that we report any
+		 * change in output level to the NS caller.
+		 */
+		if (check_pending_timers(rec)) {
+			rec_exit->exit_reason = RMI_EXIT_IRQ;
+			break;
+		}
+
+		activate_events(rec);
+		realm_exception_code = run_realm(&rec->regs[0]);
+	} while (handle_realm_exit(rec, rec_exit, realm_exception_code));
+
+	/*
+	 * Check if FPU/SIMD was used, and if it was, save the realm state,
+	 * restore the NS state, and reenable traps in CPTR_EL2.
+	 */
+	if (rec->fpu_ctx.used) {
+		unsigned long cptr;
+
+		cptr = read_cptr_el2();
+		cptr &= ~(CPTR_EL2_ZEN_MASK << CPTR_EL2_ZEN_SHIFT);
+		cptr |= (CPTR_EL2_ZEN_NO_TRAP_11 << CPTR_EL2_ZEN_SHIFT);
+		write_cptr_el2(cptr);
+
+		fpu_save_state(&rec->fpu_ctx.fpu);
+		if (ns_state->sve != NULL) {
+			restore_sve_state(ns_state->sve);
+		} else {
+			assert(ns_state->fpu != NULL);
+			fpu_restore_state(ns_state->fpu);
+		}
+
+		cptr = read_cptr_el2();
+		cptr &= ~(CPTR_EL2_FPEN_MASK << CPTR_EL2_FPEN_SHIFT);
+		cptr |= (CPTR_EL2_FPEN_TRAP_ALL_00 << CPTR_EL2_FPEN_SHIFT);
+		cptr &= ~(CPTR_EL2_ZEN_MASK << CPTR_EL2_ZEN_SHIFT);
+		cptr |= (CPTR_EL2_ZEN_TRAP_ALL_00 << CPTR_EL2_ZEN_SHIFT);
+		write_cptr_el2(cptr);
+		rec->fpu_ctx.used = false;
+	}
+
+	/*
+	 * Clear FPU/SVE context while exiting
+	 */
+	ns_state->sve = NULL;
+	ns_state->fpu = NULL;
+
+	/*
+	 * Clear NS pointer since that struct is local to this function.
+	 */
+	rec->ns = NULL;
+
+	report_timer_state_to_ns(rec_exit);
+
+	save_realm_state(rec);
+	restore_ns_state(ns_state);
+
+	/* Undo the heap association */
+	attestation_heap_ctx_unassign_pe(&rec->alloc_info.ctx);
+	/* Unmap auxiliary granules */
+	unmap_rec_aux(rec_aux, rec->num_rec_aux);
+}
diff --git a/runtime/core/sysregs.c b/runtime/core/sysregs.c
new file mode 100644
index 0000000..55fac96
--- /dev/null
+++ b/runtime/core/sysregs.c
@@ -0,0 +1,222 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <esr.h>
+#include <memory_alloc.h>
+#include <rec.h>
+#include <smc-rmi.h>
+
+#define SYSREG_READ_CASE(reg) \
+	case ESR_EL2_SYSREG_##reg: return read_##reg()
+
+static unsigned long read_idreg(unsigned int idreg)
+{
+	switch (idreg) {
+	SYSREG_READ_CASE(ID_AA64PFR0_EL1);
+	SYSREG_READ_CASE(ID_AA64PFR1_EL1);
+	/*
+	 * TODO: not supported without SVE:
+	 * SYSREG_READ_CASE(ID_AA64ZFR0_EL1);
+	 */
+	SYSREG_READ_CASE(ID_AA64DFR0_EL1);
+	SYSREG_READ_CASE(ID_AA64DFR1_EL1);
+	SYSREG_READ_CASE(ID_AA64AFR0_EL1);
+	SYSREG_READ_CASE(ID_AA64AFR1_EL1);
+	SYSREG_READ_CASE(ID_AA64ISAR0_EL1);
+	SYSREG_READ_CASE(ID_AA64ISAR1_EL1);
+	SYSREG_READ_CASE(ID_AA64MMFR0_EL1);
+	SYSREG_READ_CASE(ID_AA64MMFR1_EL1);
+	SYSREG_READ_CASE(ID_AA64MMFR2_EL1);
+
+	default:
+		/* All other encodings are in the RES0 space */
+		return 0UL;
+	}
+}
+
+/*
+ * Handle ID_AA64XXX<n>_EL1 instructions
+ */
+static bool handle_id_sysreg_trap(struct rec *rec,
+				  struct rmi_rec_exit *rec_exit,
+				  unsigned long esr)
+{
+	unsigned int rt;
+	unsigned long idreg, mask;
+
+	/*
+	 * We only set HCR_EL2.TID3 to trap ID registers at the moment and
+	 * that only traps reads of registers. Seeing a write here indicates a
+	 * consistency problem with the RMM and we should panic immediately.
+	 */
+	assert(!ESR_EL2_SYSREG_IS_WRITE(esr));
+
+	/*
+	 * Read Rt value from the issued instruction,
+	 * the general-purpose register used for the transfer.
+	 */
+	rt = ESR_EL2_SYSREG_ISS_RT(esr);
+
+	/* Handle writes to XZR register */
+	if (rt == 31U) {
+		return true;
+	}
+
+	idreg = esr & ESR_EL2_SYSREG_MASK;
+
+	if (idreg == ESR_EL2_SYSREG_ID_AA64ISAR1_EL1) {
+		/* Clear Address and Generic Authentication bits */
+		mask = (0xfUL << ESR_EL2_SYSREG_ID_AA64ISAR1_APA_SHIFT) |
+		       (0xfUL << ESR_EL2_SYSREG_ID_AA64ISAR1_API_SHIFT) |
+		       (0xfUL << ESR_EL2_SYSREG_ID_AA64ISAR1_GPA_SHIFT) |
+		       (0xfUL << ESR_EL2_SYSREG_ID_AA64ISAR1_GPI_SHIFT);
+	/*
+	 * Workaround for TF-A trapping AMU registers access
+	 * to EL3 in Realm state
+	 */
+	} else if (idreg == ESR_EL2_SYSREG_ID_AA64PFR0_EL1) {
+		/* Clear support for Activity Monitors Extension */
+		mask = MASK(ID_AA64PFR0_EL1_AMU);
+
+		/*
+		 * Clear support for SVE. This is a temporary fix until RMM
+		 * completely supports SVE.
+		 */
+		mask |= MASK(ID_AA64PFR0_EL1_SVE);
+	} else {
+		mask = 0UL;
+	}
+
+	ARRAY_WRITE(rec->regs, rt, read_idreg(idreg) & ~mask);
+
+	return true;
+}
+
+static bool handle_icc_el1_sysreg_trap(struct rec *rec,
+				       struct rmi_rec_exit *rec_exit,
+				       unsigned long esr)
+{
+	__unused unsigned long sysreg = esr & ESR_EL2_SYSREG_MASK;
+
+	/*
+	 * We should only have configured ICH_HCR_EL2 to trap on DIR and we
+	 * always trap on the SGIRs following the architecture, so make sure
+	 * we're not accidentally trapping on some other register here.
+	 */
+	assert((sysreg == ESR_EL2_SYSREG_ICC_DIR) ||
+	       (sysreg == ESR_EL2_SYSREG_ICC_SGI1R_EL1) ||
+	       (sysreg == ESR_EL2_SYSREG_ICC_SGI0R_EL1));
+
+	/*
+	 * The registers above should only trap to EL2 for writes, read
+	 * instructions are not defined and should cause an Undefined exception
+	 * at EL1.
+	 */
+	assert(ESR_EL2_SYSREG_IS_WRITE(esr));
+
+	rec_exit->exit_reason = RMI_EXIT_SYNC;
+	rec_exit->esr = esr;
+	return false;
+}
+
+typedef bool (*sysreg_handler_fn)(struct rec *rec, struct rmi_rec_exit *rec_exit,
+				  unsigned long esr);
+
+struct sysreg_handler {
+	unsigned long esr_mask;
+	unsigned long esr_value;
+	sysreg_handler_fn fn;
+};
+
+#define SYSREG_HANDLER(_mask, _value, _handler_fn) \
+	{ .esr_mask = (_mask), .esr_value = (_value), .fn = _handler_fn }
+
+static const struct sysreg_handler sysreg_handlers[] = {
+	SYSREG_HANDLER(ESR_EL2_SYSREG_ID_MASK, ESR_EL2_SYSREG_ID, handle_id_sysreg_trap),
+	SYSREG_HANDLER(ESR_EL2_SYSREG_ICC_EL1_MASK, ESR_EL2_SYSREG_ICC_EL1, handle_icc_el1_sysreg_trap),
+	SYSREG_HANDLER(ESR_EL2_SYSREG_MASK, ESR_EL2_SYSREG_ICC_PMR_EL1, handle_icc_el1_sysreg_trap)
+};
+
+static unsigned long get_sysreg_write_value(struct rec *rec, unsigned long esr)
+{
+	unsigned int rt = esr_sysreg_rt(esr);
+	unsigned long val;
+
+	/* Handle reads from XZR register */
+	if (rt == 31U) {
+		return 0UL;
+	}
+
+	ARRAY_READ(rec->regs, rt, val);
+	return val;
+}
+
+static void emulate_sysreg_access_ns(struct rec *rec, struct rmi_rec_exit *rec_exit,
+				     unsigned long esr)
+{
+	if (ESR_EL2_SYSREG_IS_WRITE(esr)) {
+		rec_exit->gprs[0] = get_sysreg_write_value(rec, esr);
+	}
+}
+
+/*
+ * Handle trapped MSR, MRS or System instruction execution
+ * in AArch64 state
+ */
+bool handle_sysreg_access_trap(struct rec *rec, struct rmi_rec_exit *rec_exit,
+			       unsigned long esr)
+{
+	/*
+	 * Read Rt value from the issued instruction,
+	 * the general-purpose register used for the transfer.
+	 */
+	unsigned int rt = ESR_EL2_SYSREG_ISS_RT(esr);
+	unsigned int i;
+	unsigned int __unused op0, op1, crn, crm, op2;
+	unsigned long __unused sysreg;
+
+	/* Check for 32-bit instruction trapped */
+	assert(ESR_IL(esr) != 0UL);
+
+	for (i = 0U; i < ARRAY_LEN(sysreg_handlers); i++) {
+		const struct sysreg_handler *handler = &sysreg_handlers[i];
+		bool handled;
+
+		if ((esr & handler->esr_mask) == handler->esr_value) {
+			handled = handler->fn(rec, rec_exit, esr);
+			if (!handled) {
+				emulate_sysreg_access_ns(rec, rec_exit, esr);
+			}
+			return handled;
+		}
+	}
+
+	/*
+	 * For now, treat all unhandled accesses as RAZ/WI.
+	 * Handle writes to XZR register.
+	 */
+	if (!ESR_EL2_SYSREG_IS_WRITE(esr) && (rt != 31U)) {
+		ARRAY_WRITE(rec->regs, rt, 0UL);
+	}
+
+	sysreg = esr & ESR_EL2_SYSREG_MASK;
+
+	/* Extract sytem register encoding */
+	op0 = EXTRACT(ESR_EL2_SYSREG_TRAP_OP0, sysreg);
+	op1 = EXTRACT(ESR_EL2_SYSREG_TRAP_OP1, sysreg);
+	crn = EXTRACT(ESR_EL2_SYSREG_TRAP_CRN, sysreg);
+	crm = EXTRACT(ESR_EL2_SYSREG_TRAP_CRM, sysreg);
+	op2 = EXTRACT(ESR_EL2_SYSREG_TRAP_OP2, sysreg);
+
+	INFO("Unhandled %s S%u_%u_C%u_C%u_%u\n",
+		ESR_EL2_SYSREG_IS_WRITE(esr) ? "write" : "read",
+		op0, op1, crn, crm, op2);
+
+	return true;
+}
diff --git a/runtime/core/vmid.c b/runtime/core/vmid.c
new file mode 100644
index 0000000..64c30b3
--- /dev/null
+++ b/runtime/core/vmid.c
@@ -0,0 +1,65 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch_features.h>
+#include <assert.h>
+#include <atomics.h>
+#include <sizes.h>
+#include <spinlock.h>
+#include <vmid.h>
+
+#define VMID8_COUNT		(1U << 8)
+#define VMID16_COUNT		(1U << 16)
+#define MAX_VMID_COUNT		VMID16_COUNT
+#define VMID_ARRAY_LONG_SIZE	(MAX_VMID_COUNT / BITS_PER_UL)
+
+/*
+ * The bitmap for the reserved/used VMID values.
+ */
+static unsigned long vmids[VMID_ARRAY_LONG_SIZE];
+
+/*
+ * Marks the VMID value to be in use. It returns:
+ * - True, on success
+ * - False, if the vmid is out of range,
+ *   or if it was already reserved (in use).
+ */
+bool vmid_reserve(unsigned int vmid)
+{
+	unsigned int offset;
+	unsigned int vmid_count;
+
+	/* Number of supported VMID values */
+	vmid_count = is_feat_vmid16_present() ?	VMID16_COUNT : VMID8_COUNT;
+	/*
+	 * The input from NS as part of RMI_REALM_CREATE is 'short int' type,
+	 * so this check will not fail on systems with FEAT_VMID16 implemented.
+	 */
+	if (vmid >= vmid_count) {
+		return false;
+	}
+
+	offset = vmid / BITS_PER_UL;
+
+	return !atomic_bit_set_acquire_release_64(&vmids[offset], vmid);
+}
+
+/*
+ * Marks the VMID value to be not in use.
+ */
+void vmid_free(unsigned int vmid)
+{
+	unsigned int offset;
+	unsigned int __unused vmid_count;
+
+	/* Number of supported VMID values */
+	vmid_count = is_feat_vmid16_present() ? VMID16_COUNT : VMID8_COUNT;
+
+	/* Check the number of supported VMID values */
+	assert(vmid < vmid_count);
+	offset = vmid / BITS_PER_UL;
+
+	atomic_bit_clear_release_64(&vmids[offset], vmid);
+}
diff --git a/runtime/include/exit.h b/runtime/include/exit.h
new file mode 100644
index 0000000..10fa258
--- /dev/null
+++ b/runtime/include/exit.h
@@ -0,0 +1,16 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef EXIT_H
+#define EXIT_H
+
+#include <stdbool.h>
+
+struct rec;
+struct rmi_rec_exit;
+
+bool handle_realm_exit(struct rec *rec, struct rmi_rec_exit *rec_exit, int exception);
+
+#endif /* EXIT_H */
diff --git a/runtime/include/feature.h b/runtime/include/feature.h
new file mode 100644
index 0000000..b390813
--- /dev/null
+++ b/runtime/include/feature.h
@@ -0,0 +1,32 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef FEATURE_H
+#define FEATURE_H
+
+#include <arch.h>
+
+#define	RMM_FEATURE_MIN_IPA_SIZE		PARANGE_0000_WIDTH
+
+#define RMM_FEATURE_REGISTER_0_INDEX		UL(0)
+
+#define RMM_FEATURE_REGISTER_0_S2SZ_SHIFT	UL(0)
+#define RMM_FEATURE_REGISTER_0_S2SZ_WIDTH	UL(8)
+
+#define RMM_FEATURE_REGISTER_0_LPA2_SHIFT	UL(8)
+#define RMM_FEATURE_REGISTER_0_LPA2_WIDTH	UL(1)
+
+#define	RMI_NO_LPA2				UL(0)
+#define	RMI_LPA2				UL(1)
+
+#define RMM_FEATURE_REGISTER_0_HASH_SHA_256_SHIFT	UL(28)
+#define RMM_FEATURE_REGISTER_0_HASH_SHA_256_WIDTH	UL(1)
+
+#define RMM_FEATURE_REGISTER_0_HASH_SHA_512_SHIFT	UL(29)
+#define RMM_FEATURE_REGISTER_0_HASH_SHA_512_WIDTH	UL(1)
+
+bool validate_feature_register(unsigned long index, unsigned long value);
+
+#endif /* FEATURE_H */
diff --git a/runtime/include/inject_exp.h b/runtime/include/inject_exp.h
new file mode 100644
index 0000000..c899576
--- /dev/null
+++ b/runtime/include/inject_exp.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef INJECT_EXP_H
+#define INJECT_EXP_H
+
+struct rec;
+
+void inject_sync_idabort(unsigned long fsc);
+void inject_sync_idabort_rec(struct rec *rec, unsigned long fsc);
+void realm_inject_undef_abort(void);
+
+#endif /* INJECT_EXP_H */
diff --git a/runtime/include/psci.h b/runtime/include/psci.h
new file mode 100644
index 0000000..e11b5e8
--- /dev/null
+++ b/runtime/include/psci.h
@@ -0,0 +1,119 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef PSCI_H
+#define PSCI_H
+
+#include <smc.h>
+#include <status.h>
+#include <stdbool.h>
+
+#define SMC32_PSCI_FID(_offset)		SMC32_STD_FID(PSCI, _offset)
+#define SMC64_PSCI_FID(_offset)		SMC64_STD_FID(PSCI, _offset)
+
+#define IS_SMC32_PSCI_FID(_fid)		IS_SMC32_STD_FAST_IN_RANGE(PSCI, _fid)
+#define IS_SMC64_PSCI_FID(_fid)		IS_SMC64_STD_FAST_IN_RANGE(PSCI, _fid)
+
+#define SMC32_PSCI_FID_MIN		SMC32_PSCI_FID(SMC32_PSCI_FNUM_MIN)
+#define SMC32_PSCI_FID_MAX		SMC32_PSCI_FID(SMC32_PSCI_FNUM_MAX)
+
+#define SMC64_PSCI_FID_MIN		SMC64_PSCI_FID(SMC64_PSCI_FNUM_MIN)
+#define SMC64_PSCI_FID_MAX		SMC64_PSCI_FID(SMC64_PSCI_FNUM_MAX)
+
+#define SMC32_PSCI_VERSION		SMC32_PSCI_FID(0x0U)
+
+#define SMC32_PSCI_CPU_SUSPEND		SMC32_PSCI_FID(0x1U)
+#define SMC64_PSCI_CPU_SUSPEND		SMC64_PSCI_FID(0x1U)
+
+#define SMC32_PSCI_CPU_OFF		SMC32_PSCI_FID(0x2U)
+
+#define SMC32_PSCI_CPU_ON		SMC32_PSCI_FID(0x3U)
+#define SMC64_PSCI_CPU_ON		SMC64_PSCI_FID(0x3U)
+
+#define SMC32_PSCI_AFFINITY_INFO	SMC32_PSCI_FID(0x4U)
+#define SMC64_PSCI_AFFINITY_INFO	SMC64_PSCI_FID(0x4U)
+
+#define SMC32_PSCI_MIGRATE		SMC32_PSCI_FID(0x5U)
+#define SMC64_PSCI_MIGRATE		SMC64_PSCI_FID(0x5U)
+
+#define SMC32_PSCI_MIGRATE_INFO_TYPE	SMC32_PSCI_FID(0x6U)
+
+#define SMC32_PSCI_MIGRATE_INFO_UP_CPU	SMC32_PSCI_FID(0x7U)
+#define SMC64_PSCI_MIGRATE_INFO_UP_CPU	SMC64_PSCI_FID(0x7U)
+
+#define SMC32_PSCI_SYSTEM_OFF		SMC32_PSCI_FID(0x8U)
+
+#define SMC32_PSCI_SYSTEM_RESET		SMC32_PSCI_FID(0x9U)
+
+#define SMC32_PSCI_FEATURES		SMC32_PSCI_FID(0xAU)
+
+#define SMC32_PSCI_CPU_FREEZE		SMC32_PSCI_FID(0xBU)
+
+#define SMC32_PSCI_CPU_DEFAULT_SUSPEND	SMC32_PSCI_FID(0xCU)
+#define SMC64_PSCI_CPU_DEFAULT_SUSPEND	SMC64_PSCI_FID(0xCU)
+
+#define SMC32_PSCI_NODE_HW_STATE	SMC32_PSCI_FID(0xDU)
+#define SMC64_PSCI_NODE_HW_STATE	SMC64_PSCI_FID(0xDU)
+
+#define SMC32_PSCI_SYSTEM_SUSPEND	SMC32_PSCI_FID(0xEU)
+#define SMC64_PSCI_SYSTEM_SUSPEND	SMC64_PSCI_FID(0xEU)
+
+#define SMC32_PSCI_SET_SUSPEND_MODE	SMC32_PSCI_FID(0xFU)
+
+#define SMC32_PSCI_STAT_RESIDENCY	SMC32_PSCI_FID(0x10U)
+#define SMC64_PSCI_STAT_RESIDENCY	SMC64_PSCI_FID(0x10U)
+
+#define SMC32_PSCI_STAT_COUNT		SMC32_PSCI_FID(0x11U)
+#define SMC64_PSCI_STAT_COUNT		SMC64_PSCI_FID(0x11U)
+
+#define SMC32_PSCI_SYSTEM_RESET2	SMC32_PSCI_FID(0x12U)
+#define SMC64_PSCI_SYSTEM_RESET2	SMC64_PSCI_FID(0x12U)
+
+#define SMC32_PSCI_MEM_PROTECT		SMC32_PSCI_FID(0x13U)
+
+#define SMC32_PSCI_MEM_PROTECT_CHECK_RANGE	SMC32_PSCI_FID(0x14U)
+#define SMC64_PSCI_MEM_PROTECT_CHECK_RANGE	SMC64_PSCI_FID(0x14U)
+
+#define PSCI_RETURN_SUCCESS		UL(0)
+#define PSCI_RETURN_NOT_SUPPORTED	UL(-1)
+#define PSCI_RETURN_INVALID_PARAMS	UL(-2)
+#define PSCI_RETURN_DENIED		UL(-3)
+#define PSCI_RETURN_ALREADY_ON		UL(-4)
+#define PSCI_RETURN_ON_PENDING		UL(-5)
+#define PSCI_RETURN_INTERNAL_FAILURE	UL(-6)
+#define PSCI_RETURN_NOT_PRESENT		UL(-7)
+#define PSCI_RETURN_DISABLED		UL(-8)
+#define PSCI_RETURN_INVALID_ADDRESS	UL(-9)
+
+#define PSCI_AFFINITY_INFO_ON		UL(0)
+#define PSCI_AFFINITY_INFO_OFF		UL(1)
+#define PSCI_AFFINITY_INFO_ON_PENDING	UL(2)
+
+#define PSCI_NODE_HW_ON			UL(0)
+#define PSCI_NODE_HW_OFF		UL(1)
+#define PSCI_NODE_HW_STANDBY		UL(2)
+
+struct rec;
+
+struct psci_result {
+	struct {
+		bool forward_psci_call;
+		unsigned long x1;
+		unsigned long x2;
+		unsigned long x3;
+	} hvc_forward;
+	struct smc_result smc_res;
+};
+
+struct psci_result psci_rsi(struct rec *rec,
+			    unsigned int function_id,
+			    unsigned long arg0,
+			    unsigned long arg1,
+			    unsigned long arg2);
+
+unsigned long psci_complete_request(struct rec *calling_rec,
+				    struct rec *target_rec);
+
+#endif /* PSCI_H */
diff --git a/runtime/include/realm_attest.h b/runtime/include/realm_attest.h
new file mode 100644
index 0000000..6e2d1b1
--- /dev/null
+++ b/runtime/include/realm_attest.h
@@ -0,0 +1,42 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef REALM_ATTEST_H
+#define REALM_ATTEST_H
+
+#include <rsi-walk.h>
+#include <stdbool.h>
+
+struct rec;
+
+struct attest_result {
+	/*
+	 * If true, RMM should repeat the operation.
+	 *
+	 * If false, contents of @access are valid.
+	 */
+	bool incomplete;
+
+	/*
+	 * Result of RTT walk performed by RSI command.
+	 */
+	struct rsi_walk_result walk_result;
+
+	/*
+	 * If @incomplete is false and @walk_result.abort is false,
+	 * @smc_result contains GPR values to be returned to the Realm.
+	 */
+	struct smc_result smc_res;
+};
+
+unsigned long handle_rsi_read_measurement(struct rec *rec);
+unsigned long handle_rsi_extend_measurement(struct rec *rec);
+unsigned long handle_rsi_attest_token_init(struct rec *rec);
+void attest_realm_token_sign_continue_start(void);
+void handle_rsi_attest_token_continue(struct rec *rec,
+				      struct attest_result *res);
+void attest_realm_token_sign_continue_finish(void);
+
+#endif /* REALM_ATTEST_H */
diff --git a/runtime/include/rsi-config.h b/runtime/include/rsi-config.h
new file mode 100644
index 0000000..aaa3fe5
--- /dev/null
+++ b/runtime/include/rsi-config.h
@@ -0,0 +1,29 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef RSI_CONFIG_H
+#define RSI_CONFIG_H
+
+#include <rsi-walk.h>
+#include <smc.h>
+
+struct rec;
+
+struct rsi_config_result {
+	/*
+	 * Result of RTT walk performed by RSI command.
+	 */
+	struct rsi_walk_result walk_result;
+
+	/*
+	 * If @walk_result.abort is false, @smc_res contains GPR values to be
+	 * returned to the Realm.
+	 */
+	struct smc_result smc_res;
+};
+
+struct rsi_config_result handle_rsi_realm_config(struct rec *rec);
+
+#endif /* RSI_CONFIG_H */
diff --git a/runtime/include/rsi-handler.h b/runtime/include/rsi-handler.h
new file mode 100644
index 0000000..0151229
--- /dev/null
+++ b/runtime/include/rsi-handler.h
@@ -0,0 +1,11 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef RSI_HANDLER_H
+#define RSI_HANDLER_H
+
+unsigned long system_rsi_abi_version(void);
+
+#endif /* RSI_HANDLER_H */
diff --git a/runtime/include/rsi-host-call.h b/runtime/include/rsi-host-call.h
new file mode 100644
index 0000000..aa58d47
--- /dev/null
+++ b/runtime/include/rsi-host-call.h
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef RSI_HOST_CALL_H
+#define RSI_HOST_CALL_H
+
+#include <rsi-walk.h>
+#include <smc-rmi.h>
+
+struct rmi_rec_entry;
+struct rmi_rec_exit;
+
+struct rsi_host_call_result {
+	/*
+	 * Result of RTT walk performed by RSI command.
+	 */
+	struct rsi_walk_result walk_result;
+
+	/*
+	 * If @walk_result.abort is false,
+	 * @smc_result contains X0 value to be returned to the Realm.
+	 */
+	unsigned long smc_result;
+};
+
+struct rsi_host_call_result handle_rsi_host_call(struct rec *rec,
+						 struct rmi_rec_exit *rec_exit);
+
+struct rsi_walk_result complete_rsi_host_call(struct rec *rec,
+					      struct rmi_rec_entry *rec_entry);
+
+#endif /* RSI_HOST_CALL_H */
diff --git a/runtime/include/rsi-logger.h b/runtime/include/rsi-logger.h
new file mode 100644
index 0000000..86131d6
--- /dev/null
+++ b/runtime/include/rsi-logger.h
@@ -0,0 +1,41 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef RSI_LOGGER_H
+#define RSI_LOGGER_H
+
+#include <debug.h>
+
+/*
+ * RSI_LOG_LEVEL debug level is set to one of:
+ * LOG_LEVEL_NONE    = 0
+ * LOG_LEVEL_ERROR   = 10
+ * LOG_LEVEL_NOTICE  = 20
+ * LOG_LEVEL_WARNING = 30
+ * LOG_LEVEL_INFO    = 40
+ * LOG_LEVEL_VERBOSE = 50
+ */
+#if (RSI_LOG_LEVEL >= LOG_LEVEL_ERROR) && (RSI_LOG_LEVEL <= LOG_LEVEL)
+
+void rsi_log_on_exit(unsigned int function_id, unsigned long args[5],
+		     unsigned long res, bool exit_to_rec);
+
+/* Store SMC RSI parameters */
+# define RSI_LOG_SET(x0, x1, x2, x3, x4)	\
+	unsigned long rsi_log_args[5] = {x0, x1, x2, x3, x4}
+
+/*
+ * Macro prints RSI call function name, parameters
+ * and result when returning back to REC
+ */
+# define RSI_LOG_EXIT(id, res, ret)	\
+	rsi_log_on_exit(id, rsi_log_args, res, ret)
+
+#else
+# define RSI_LOG_SET(x0, x1, x2, x3, x4)
+# define RSI_LOG_EXIT(id, res, ret)
+
+#endif /* (>= LOG_LEVEL_ERROR) && (<= LOG_LEVEL) */
+#endif /* RSI_LOGGER_H */
diff --git a/runtime/include/rsi-memory.h b/runtime/include/rsi-memory.h
new file mode 100644
index 0000000..e74e15e
--- /dev/null
+++ b/runtime/include/rsi-memory.h
@@ -0,0 +1,19 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef	RSI_MEMORY_H
+#define	RSI_MEMORY_H
+
+#include <smc-rsi.h>
+
+struct rec;
+struct rmi_rec_exit;
+
+bool handle_rsi_ipa_state_set(struct rec *rec, struct rmi_rec_exit *rec_exit);
+
+rsi_status_t handle_rsi_ipa_state_get(struct rec *rec, unsigned long ipa,
+				      enum ripas *ripas);
+
+#endif /* RSI_MEMORY_H */
diff --git a/runtime/include/rsi-walk.h b/runtime/include/rsi-walk.h
new file mode 100644
index 0000000..01ff6c1
--- /dev/null
+++ b/runtime/include/rsi-walk.h
@@ -0,0 +1,20 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef RSI_WALK_H
+#define RSI_WALK_H
+
+struct rsi_walk_result {
+	/*
+	 * If true, RTT walk failed due to missing PTE at level @rtt_level.
+	 *
+	 * If false, @smc_result contains GPR values to be returned to the
+	 * Realm.
+	 */
+	bool abort;
+	unsigned long rtt_level;
+};
+
+#endif /* RSI_WALK_H */
diff --git a/runtime/include/run.h b/runtime/include/run.h
new file mode 100644
index 0000000..3c3d8ae
--- /dev/null
+++ b/runtime/include/run.h
@@ -0,0 +1,17 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef RUN_H
+#define RUN_H
+
+/*
+ * Function to enter Realm with `regs` pointing to GP Regs to be
+ * restored/saved when entering/exiting the Realm. This function
+ * returns with the Realm exception code which is populated by
+ * Realm_exit() on aarch64.
+ */
+int run_realm(unsigned long *regs);
+
+#endif /* RUN_H */
diff --git a/runtime/include/smc-handler.h b/runtime/include/smc-handler.h
new file mode 100644
index 0000000..e9e49fc
--- /dev/null
+++ b/runtime/include/smc-handler.h
@@ -0,0 +1,95 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef SMC_HANDLER_H
+#define SMC_HANDLER_H
+
+#include <smc.h>
+
+unsigned long smc_version(void);
+
+void smc_read_feature_register(unsigned long index,
+				struct smc_result *ret_struct);
+
+unsigned long smc_data_create(unsigned long data_addr,
+			      unsigned long rd_addr,
+			      unsigned long map_addr,
+			      unsigned long src_addr,
+			      unsigned long flags);
+
+unsigned long smc_data_create_unknown(unsigned long data_addr,
+				      unsigned long rd_addr,
+				      unsigned long map_addr);
+
+unsigned long smc_data_destroy(unsigned long rd_addr,
+			       unsigned long map_addr);
+
+unsigned long smc_granule_delegate(unsigned long addr);
+
+unsigned long smc_granule_undelegate(unsigned long addr);
+
+unsigned long smc_realm_activate(unsigned long rd_addr);
+
+unsigned long smc_realm_create(unsigned long rd_addr,
+			     unsigned long realm_params_addr);
+
+unsigned long smc_realm_destroy(unsigned long rd_addr);
+
+unsigned long smc_rec_create(unsigned long rec_addr,
+			     unsigned long rd_addr,
+			     unsigned long rec_params_addr);
+
+unsigned long smc_rec_destroy(unsigned long rec_addr);
+
+unsigned long smc_rec_enter(unsigned long rec_addr,
+			    unsigned long rec_run_addr);
+
+void smc_rec_aux_count(unsigned long rd_addr,
+			struct smc_result *ret_struct);
+
+unsigned long smc_rtt_create(unsigned long rtt_addr,
+			     unsigned long rd_addr,
+			     unsigned long map_addr,
+			     unsigned long ulevel);
+
+unsigned long smc_rtt_destroy(unsigned long rtt_addr,
+			      unsigned long rd_addr,
+			      unsigned long map_addr,
+			      unsigned long ulevel);
+
+unsigned long smc_rtt_fold(unsigned long rtt_addr,
+			   unsigned long rd_addr,
+			   unsigned long map_addr,
+			   unsigned long ulevel);
+
+unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
+				      unsigned long map_addr,
+				      unsigned long ulevel,
+				      unsigned long s2tte);
+
+unsigned long smc_rtt_unmap_unprotected(unsigned long rd_addr,
+					unsigned long map_addr,
+					unsigned long ulevel);
+
+void smc_rtt_read_entry(unsigned long rd_addr,
+			unsigned long map_addr,
+			unsigned long ulevel,
+			struct smc_result *ret_struct);
+
+unsigned long smc_psci_complete(unsigned long calling_rec_addr,
+				unsigned long target_rec_addr);
+
+unsigned long smc_rtt_init_ripas(unsigned long rd_addr,
+				 unsigned long map_addr,
+				 unsigned long ulevel);
+
+unsigned long smc_rtt_set_ripas(unsigned long rd_addr,
+				unsigned long rec_addr,
+				unsigned long map_addr,
+				unsigned long ulevel,
+				unsigned long uripas);
+
+
+#endif /* SMC_HANDLER_H */
diff --git a/runtime/include/sysreg_traps.h b/runtime/include/sysreg_traps.h
new file mode 100644
index 0000000..2fc0a84
--- /dev/null
+++ b/runtime/include/sysreg_traps.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#ifndef SYSREGS_H
+#define SYSREGS_H
+
+struct rec;
+struct rmi_rec_exit;
+
+bool handle_sysreg_access_trap(struct rec *rec, struct rmi_rec_exit *rec_exit,
+			       unsigned long esr);
+
+#endif /* SYSREGS_H */
diff --git a/runtime/linker.lds b/runtime/linker.lds
new file mode 100644
index 0000000..0928fae
--- /dev/null
+++ b/runtime/linker.lds
@@ -0,0 +1,106 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <sizes.h>
+
+ENTRY(rmm_entry)
+
+MEMORY {
+	RAM (rwx): ORIGIN = 0x0, LENGTH = RMM_MAX_SIZE
+}
+
+SECTIONS
+{
+	rmm_base = .;
+
+	.text . : {
+		rmm_text_start = .;
+		*head.S.obj(.text*)
+		. = ALIGN(8);
+		*(.text*)
+		. = ALIGN(GRANULE_SIZE);
+	} >RAM
+
+	rmm_text_end = .;
+
+	ASSERT(rmm_text_end == ALIGN(GRANULE_SIZE), "rmm_text_end is not page aligned")
+
+	.rodata ALIGN(GRANULE_SIZE) : {
+		rmm_ro_start = .;
+		*(.rodata*)
+		. = ALIGN(8);
+		rmm_got_start = .;
+		*(.got)
+		rmm_got_end = .;
+	} >RAM
+
+	/*
+	 * The xlat_static_table section is for full, aligned page tables.
+	 * The static tables must not change once the MMU is enabled, so
+	 * allocate them on the RO area to keep them protected from writing.
+	 *
+	 * The memory will be cleared by the xlat library during start up.
+	 */
+	xlat_table ALIGN(GRANULE_SIZE) : {
+		*(xlat_static_tables)
+	} >RAM
+
+	rmm_ro_end = .;
+
+	ASSERT(rmm_ro_end == ALIGN(GRANULE_SIZE), "rmm_ro_end is not page aligned")
+
+	/* Align rw data to the next 2MB block */
+	.data ALIGN(SZ_2M) : {
+		rmm_rw_start = .;
+		*(.data*)
+	} >RAM
+
+	/*
+	 * .rela.dyn needs to come after .data for the read-elf utility to
+	 * parse this section correctly.
+	 */
+	.rela.dyn ALIGN(8) : {
+		rmm_rela_start = .;
+		*(.rela*)
+		rmm_rela_end = .;
+	} >RAM
+
+	.percpu ALIGN(GRANULE_SIZE) (NOLOAD) : {
+		stack_start = .;
+		. = . + (RMM_NUM_PAGES_PER_STACK * GRANULE_SIZE * MAX_CPUS);
+		stack_end = .;
+	} >RAM
+
+	.bss ALIGN(16) (NOLOAD) : {
+		bss_start = .;
+		*(.bss*)
+		bss_end = .;
+	} >RAM
+
+	/*
+	 * The slot_buffer_xlat_tbl section is for full, aligned page tables.
+	 * The dynamic tables are used for transient memory areas that can
+	 * change at any time, so the tables must have RW access.
+	 *
+	 * The tables will be erased by the xlat library during start up.
+	 */
+	slot_buffer_xlat_tbl ALIGN(GRANULE_SIZE) (NOLOAD) : {
+		*(slot_buffer_xlat_tbls)
+	} >RAM
+
+	rmm_rw_end = .;
+	rmm_end = rmm_rw_end;
+
+	ASSERT(rmm_rw_end == ALIGN(GRANULE_SIZE), "rmm_rw_end is not page aligned")
+
+	/DISCARD/ : { *(.dynstr*) }
+	/DISCARD/ : { *(.dynsym*) }
+	/DISCARD/ : { *(.dynamic*) }
+	/DISCARD/ : { *(.hash*) }
+	/DISCARD/ : { *(.plt*) }
+	/DISCARD/ : { *(.interp*) }
+	/DISCARD/ : { *(.gnu*) }
+	/DISCARD/ : { *(.note*) }
+}
diff --git a/runtime/rmi/feature.c b/runtime/rmi/feature.c
new file mode 100644
index 0000000..fa52ba8
--- /dev/null
+++ b/runtime/rmi/feature.c
@@ -0,0 +1,73 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch_features.h>
+#include <assert.h>
+#include <feature.h>
+#include <smc-handler.h>
+#include <smc-rmi.h>
+#include <status.h>
+
+static unsigned long get_feature_register_0(void)
+{
+	/* Set S2SZ field */
+	unsigned long s2sz = arch_feat_get_pa_width();
+	unsigned long feat_reg0 = INPLACE(RMM_FEATURE_REGISTER_0_S2SZ, s2sz);
+
+	/* Set LPA2 field */
+	if (is_feat_lpa2_4k_present()) {
+		feat_reg0 |= INPLACE(RMM_FEATURE_REGISTER_0_LPA2, RMI_LPA2);
+	}
+
+	/* Set support for SHA256 and SHA512 hash algorithms */
+	feat_reg0 |= INPLACE(RMM_FEATURE_REGISTER_0_HASH_SHA_256, 1);
+	feat_reg0 |= INPLACE(RMM_FEATURE_REGISTER_0_HASH_SHA_512, 1);
+
+	return feat_reg0;
+}
+
+void smc_read_feature_register(unsigned long index,
+				struct smc_result *ret_struct)
+{
+	switch (index) {
+	case RMM_FEATURE_REGISTER_0_INDEX:
+		ret_struct->x[0] = RMI_SUCCESS;
+		ret_struct->x[1] = get_feature_register_0();
+		break;
+	default:
+		ret_struct->x[0] = RMI_ERROR_INPUT;
+	}
+}
+
+static bool validate_feature_register_0(unsigned long value)
+{
+	unsigned long feat_reg0 = get_feature_register_0();
+	unsigned long s2sz = EXTRACT(RMM_FEATURE_REGISTER_0_S2SZ, value);
+
+	/* Validate S2SZ field */
+	if ((s2sz < RMM_FEATURE_MIN_IPA_SIZE) ||
+	    (s2sz > EXTRACT(RMM_FEATURE_REGISTER_0_S2SZ, feat_reg0))) {
+		return false;
+	}
+
+	/* Validate LPA2 flag */
+	if ((EXTRACT(RMM_FEATURE_REGISTER_0_LPA2, value) == RMI_LPA2) &&
+	    !is_feat_lpa2_4k_present()) {
+		return false;
+	}
+
+	return true;
+}
+
+bool validate_feature_register(unsigned long index, unsigned long value)
+{
+	switch (index) {
+	case RMM_FEATURE_REGISTER_0_INDEX:
+		return validate_feature_register_0(value);
+	default:
+		assert(false);
+		return false;
+	}
+}
diff --git a/runtime/rmi/granule.c b/runtime/rmi/granule.c
new file mode 100644
index 0000000..991905c
--- /dev/null
+++ b/runtime/rmi/granule.c
@@ -0,0 +1,43 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <asc.h>
+#include <granule.h>
+#include <smc-handler.h>
+#include <smc-rmi.h>
+#include <smc.h>
+
+unsigned long smc_granule_delegate(unsigned long addr)
+{
+	struct granule *g;
+
+	g = find_lock_granule(addr, GRANULE_STATE_NS);
+	if (g == NULL) {
+		return RMI_ERROR_INPUT;
+	}
+
+	granule_set_state(g, GRANULE_STATE_DELEGATED);
+	asc_mark_secure(addr);
+	granule_memzero(g, SLOT_DELEGATED);
+
+	granule_unlock(g);
+	return RMI_SUCCESS;
+}
+
+unsigned long smc_granule_undelegate(unsigned long addr)
+{
+	struct granule *g;
+
+	g = find_lock_granule(addr, GRANULE_STATE_DELEGATED);
+	if (g == NULL) {
+		return RMI_ERROR_INPUT;
+	}
+
+	asc_mark_nonsecure(addr);
+	granule_set_state(g, GRANULE_STATE_NS);
+
+	granule_unlock(g);
+	return RMI_SUCCESS;
+}
diff --git a/runtime/rmi/realm.c b/runtime/rmi/realm.c
new file mode 100644
index 0000000..b348e90
--- /dev/null
+++ b/runtime/rmi/realm.c
@@ -0,0 +1,392 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <assert.h>
+#include <buffer.h>
+#include <feature.h>
+#include <granule.h>
+#include <measurement.h>
+#include <realm.h>
+#include <smc-handler.h>
+#include <smc-rmi.h>
+#include <smc.h>
+#include <stddef.h>
+#include <string.h>
+#include <table.h>
+#include <vmid.h>
+
+unsigned long smc_realm_activate(unsigned long rd_addr)
+{
+	struct rd *rd;
+	struct granule *g_rd;
+	unsigned long ret;
+
+	g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
+	if (g_rd == NULL) {
+		return RMI_ERROR_INPUT;
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+	if (get_rd_state_locked(rd) == REALM_STATE_NEW) {
+		set_rd_state(rd, REALM_STATE_ACTIVE);
+		ret = RMI_SUCCESS;
+	} else {
+		ret = RMI_ERROR_REALM;
+	}
+	buffer_unmap(rd);
+
+	granule_unlock(g_rd);
+
+	return ret;
+}
+
+static bool get_realm_params(struct rmi_realm_params *realm_params,
+				unsigned long realm_params_addr)
+{
+	bool ns_access_ok;
+	struct granule *g_realm_params;
+
+	g_realm_params = find_granule(realm_params_addr);
+	if ((g_realm_params == NULL) || (g_realm_params->state != GRANULE_STATE_NS)) {
+		return false;
+	}
+
+	ns_access_ok = ns_buffer_read(SLOT_NS, g_realm_params, 0U,
+				      sizeof(*realm_params), realm_params);
+
+	return ns_access_ok;
+}
+
+/*
+ * See the library pseudocode
+ * aarch64/translation/vmsa_faults/AArch64.S2InconsistentSL on which this is
+ * modeled.
+ */
+static bool s2_inconsistent_sl(unsigned int ipa_bits, int sl)
+{
+	int levels = RTT_PAGE_LEVEL - sl;
+	unsigned int sl_min_ipa_bits, sl_max_ipa_bits;
+
+	/*
+	 * The maximum number of concatenated tables is 16,
+	 * hence we are adding 4 to the 'sl_max_ipa_bits'.
+	 */
+	sl_min_ipa_bits = levels * S2TTE_STRIDE + GRANULE_SHIFT + 1U;
+	sl_max_ipa_bits = sl_min_ipa_bits + (S2TTE_STRIDE - 1U) + 4U;
+
+	return ((ipa_bits < sl_min_ipa_bits) || (ipa_bits > sl_max_ipa_bits));
+}
+
+static bool validate_ipa_bits_and_sl(unsigned int ipa_bits, long sl)
+{
+	if ((ipa_bits < MIN_IPA_BITS) || (ipa_bits > MAX_IPA_BITS)) {
+		return false;
+	}
+
+	if ((sl < MIN_STARTING_LEVEL) || (sl > RTT_PAGE_LEVEL)) {
+		return false;
+	}
+
+	/*
+	 * We assume ARMv8.4-TTST is supported with RME so the only SL
+	 * configuration we need to check with 4K granules is SL == 0 following
+	 * the library pseudocode aarch64/translation/vmsa_faults/AArch64.S2InvalidSL.
+	 *
+	 * Note that this only checks invalid SL values against the properties
+	 * of the hardware platform, other misconfigurations between IPA size
+	 * and SL is checked in s2_inconsistent_sl.
+	 */
+	if ((sl == 0L) && (max_ipa_size() < 44U)) {
+		return false;
+	}
+
+	return !s2_inconsistent_sl(ipa_bits, sl);
+}
+
+static unsigned int requested_ipa_bits(struct rmi_realm_params *p)
+{
+	return EXTRACT(RMM_FEATURE_REGISTER_0_S2SZ, p->features_0);
+}
+
+static unsigned int s2_num_root_rtts(unsigned int ipa_bits, int sl)
+{
+	unsigned int levels = (unsigned int)(RTT_PAGE_LEVEL - sl);
+	unsigned int sl_ipa_bits;
+
+	/* First calculate how many bits can be resolved without concatenation */
+	sl_ipa_bits = levels * S2TTE_STRIDE /* Bits resolved by table walk without SL */
+		      + GRANULE_SHIFT	    /* Bits directly mapped to OA */
+		      + S2TTE_STRIDE;	    /* Bits resolved by single SL */
+
+	if (sl_ipa_bits >= ipa_bits) {
+		return 1U;
+	}
+
+	return (1U << (ipa_bits - sl_ipa_bits));
+}
+
+static bool validate_realm_params(struct rmi_realm_params *p)
+{
+	if (!validate_feature_register(RMM_FEATURE_REGISTER_0_INDEX,
+					p->features_0)) {
+		return false;
+	}
+
+	if (!validate_ipa_bits_and_sl(requested_ipa_bits(p),
+					p->rtt_level_start)) {
+		return false;
+	}
+
+	if (s2_num_root_rtts(requested_ipa_bits(p),
+				p->rtt_level_start) != p->rtt_num_start) {
+		return false;
+	}
+
+	/*
+	 * TODO: Check the VMSA configuration which is either static for the
+	 * RMM or per realm with the supplied parameters and store the
+	 * configuration on the RD, and it can potentially be copied into RECs
+	 * later.
+	 */
+
+	switch (p->hash_algo) {
+	case RMI_HASH_ALGO_SHA256:
+	case RMI_HASH_ALGO_SHA512:
+		break;
+	default:
+		return false;
+	}
+
+	/* Check VMID collision and reserve it atomically if available */
+	return vmid_reserve((unsigned int)p->vmid);
+}
+
+/*
+ * Update the realm measurement with the realm parameters.
+ */
+static void realm_params_measure(struct rd *rd,
+				 struct rmi_realm_params *realm_params)
+{
+	/* By specification realm_params is 4KB */
+	unsigned char buffer[SZ_4K] = {0};
+	struct rmi_realm_params *realm_params_measured =
+		(struct rmi_realm_params *)&buffer[0];
+
+	realm_params_measured->hash_algo = realm_params->hash_algo;
+	/* TODO: Add later */
+	/* realm_params_measured->features_0 = realm_params->features_0; */
+
+	/* Measure relevant realm params this will be the init value of RIM */
+	measurement_hash_compute(rd->algorithm,
+			       buffer,
+			       sizeof(buffer),
+			       rd->measurement[RIM_MEASUREMENT_SLOT]);
+}
+
+static void free_sl_rtts(struct granule *g_rtt, unsigned int num_rtts)
+{
+	unsigned int i;
+
+	for (i = 0U; i < num_rtts; i++) {
+		struct granule *g = g_rtt + i;
+
+		granule_lock(g, GRANULE_STATE_RTT);
+		granule_memzero(g, SLOT_RTT);
+		granule_unlock_transition(g, GRANULE_STATE_DELEGATED);
+	}
+}
+
+static bool find_lock_rd_granules(unsigned long rd_addr,
+				  struct granule **p_g_rd,
+				  unsigned long rtt_base_addr,
+				  unsigned int num_rtts,
+				  struct granule **p_g_rtt_base)
+{
+	struct granule *g_rd = NULL, *g_rtt_base = NULL;
+	int i = 0;
+
+	if (rd_addr < rtt_base_addr) {
+		g_rd = find_lock_granule(rd_addr, GRANULE_STATE_DELEGATED);
+		if (g_rd == NULL) {
+			goto out_err;
+		}
+	}
+
+	for (; i < num_rtts; i++) {
+		unsigned long rtt_addr = rtt_base_addr + i * GRANULE_SIZE;
+		struct granule *g_rtt;
+
+		g_rtt = find_lock_granule(rtt_addr, GRANULE_STATE_DELEGATED);
+		if (g_rtt == NULL) {
+			goto out_err;
+		}
+
+		if (i == 0) {
+			g_rtt_base = g_rtt;
+		}
+	}
+
+	if (g_rd == NULL) {
+		g_rd = find_lock_granule(rd_addr, GRANULE_STATE_DELEGATED);
+		if (g_rd == NULL) {
+			goto out_err;
+		}
+	}
+
+	*p_g_rd = g_rd;
+	*p_g_rtt_base = g_rtt_base;
+
+	return true;
+
+out_err:
+	for (i = i - 1; i >= 0; i--) {
+		granule_unlock(g_rtt_base + i);
+	}
+
+	if (g_rd != NULL) {
+		granule_unlock(g_rd);
+	}
+
+	return false;
+}
+
+unsigned long smc_realm_create(unsigned long rd_addr,
+			       unsigned long realm_params_addr)
+{
+	struct granule *g_rd, *g_rtt_base;
+	struct rd *rd;
+	struct rmi_realm_params p;
+	unsigned int i;
+
+	if (!get_realm_params(&p, realm_params_addr)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	if (!validate_realm_params(&p)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	/*
+	 * At this point VMID is reserved for the Realm
+	 *
+	 * Check for aliasing between rd_addr and
+	 * starting level RTT address(es)
+	 */
+	if (addr_is_contained(p.rtt_base,
+			      p.rtt_base + p.rtt_num_start * GRANULE_SIZE,
+			      rd_addr)) {
+
+		/* Free reserved VMID before returning */
+		vmid_free((unsigned int)p.vmid);
+		return RMI_ERROR_INPUT;
+	}
+
+	if (!find_lock_rd_granules(rd_addr, &g_rd, p.rtt_base,
+				  p.rtt_num_start, &g_rtt_base)) {
+		/* Free reserved VMID */
+		vmid_free((unsigned int)p.vmid);
+		return RMI_ERROR_INPUT;
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+	set_rd_state(rd, REALM_STATE_NEW);
+	set_rd_rec_count(rd, 0UL);
+	rd->s2_ctx.g_rtt = find_granule(p.rtt_base);
+	rd->s2_ctx.ipa_bits = requested_ipa_bits(&p);
+	rd->s2_ctx.s2_starting_level = p.rtt_level_start;
+	rd->s2_ctx.num_root_rtts = p.rtt_num_start;
+	memcpy(&rd->rpv[0], &p.rpv[0], RPV_SIZE);
+
+	rd->s2_ctx.vmid = (unsigned int)p.vmid;
+
+	rd->num_rec_aux = MAX_REC_AUX_GRANULES;
+
+	(void)memcpy(&rd->rpv[0], &p.rpv[0], RPV_SIZE);
+
+	rd->algorithm = p.hash_algo;
+
+	switch (p.hash_algo) {
+	case RMI_HASH_ALGO_SHA256:
+		rd->algorithm = HASH_ALGO_SHA256;
+		break;
+	case RMI_HASH_ALGO_SHA512:
+		rd->algorithm = HASH_ALGO_SHA512;
+		break;
+	}
+	realm_params_measure(rd, &p);
+
+	buffer_unmap(rd);
+
+	granule_unlock_transition(g_rd, GRANULE_STATE_RD);
+
+	for (i = 0U; i < p.rtt_num_start; i++) {
+		granule_unlock_transition(g_rtt_base + i, GRANULE_STATE_RTT);
+	}
+
+	return RMI_SUCCESS;
+}
+
+static unsigned long total_root_rtt_refcount(struct granule *g_rtt,
+					     unsigned int num_rtts)
+{
+	unsigned long refcount = 0UL;
+	unsigned int i;
+
+	for (i = 0U; i < num_rtts; i++) {
+		struct granule *g = g_rtt + i;
+
+	       /*
+		* Lock starting from the RTT root.
+		* Enforcing locking order RD->RTT is enough to ensure
+		* deadlock free locking guarentee.
+		*/
+		granule_lock(g, GRANULE_STATE_RTT);
+		refcount += g->refcount;
+		granule_unlock(g);
+	}
+
+	return refcount;
+}
+
+unsigned long smc_realm_destroy(unsigned long rd_addr)
+{
+	struct granule *g_rd;
+	struct granule *g_rtt;
+	struct rd *rd;
+	unsigned int num_rtts;
+
+	/* RD should not be destroyed if refcount != 0. */
+	g_rd = find_lock_unused_granule(rd_addr, GRANULE_STATE_RD);
+	if (ptr_is_err(g_rd)) {
+		return (unsigned long)ptr_status(g_rd);
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+	g_rtt = rd->s2_ctx.g_rtt;
+	num_rtts = rd->s2_ctx.num_root_rtts;
+
+	/*
+	 * All the mappings in the Realm have been removed and the TLB caches
+	 * are invalidated. Therefore, there are no TLB entries tagged with
+	 * this Realm's VMID (in this security state).
+	 * Just release the VMID value so it can be used in another Realm.
+	 */
+	vmid_free(rd->s2_ctx.vmid);
+	buffer_unmap(rd);
+
+	/* Check if granules are unused */
+	if (total_root_rtt_refcount(g_rtt, num_rtts) != 0UL) {
+		granule_unlock(g_rd);
+		return RMI_ERROR_IN_USE;
+	}
+
+	free_sl_rtts(g_rtt, num_rtts);
+
+	/* This implictly destroys the measurement */
+	granule_memzero(g_rd, SLOT_RD);
+	granule_unlock_transition(g_rd, GRANULE_STATE_DELEGATED);
+
+	return RMI_SUCCESS;
+}
diff --git a/runtime/rmi/rec.c b/runtime/rmi/rec.c
new file mode 100644
index 0000000..5f82ad1
--- /dev/null
+++ b/runtime/rmi/rec.c
@@ -0,0 +1,416 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <arch_features.h>
+#include <attestation.h>
+#include <buffer.h>
+#include <cpuid.h>
+#include <gic.h>
+#include <granule.h>
+#include <mbedtls/memory_buffer_alloc.h>
+#include <measurement.h>
+#include <memory_alloc.h>
+#include <psci.h>
+#include <realm.h>
+#include <rec.h>
+#include <smc-handler.h>
+#include <smc-rmi.h>
+#include <smc.h>
+#include <spinlock.h>
+#include <stddef.h>
+#include <string.h>
+
+/*
+ * Allocate a dummy rec_params for copying relevant parameters for measurement
+ */
+static struct rmi_rec_params rec_params_per_cpu[MAX_CPUS];
+
+static void rec_params_measure(struct rd *rd, struct rmi_rec_params *rec_params)
+{
+	struct measurement_desc_rec measure_desc = {0};
+	struct rmi_rec_params *rec_params_measured =
+		&(rec_params_per_cpu[my_cpuid()]);
+
+	memset(rec_params_measured, 0, sizeof(*rec_params_measured));
+
+	/* Copy the relevant parts of the rmi_rec_params structure to be
+	 * measured
+	 */
+	rec_params_measured->pc = rec_params->pc;
+	rec_params_measured->flags = rec_params->flags;
+	memcpy(rec_params_measured->gprs,
+	       rec_params->gprs,
+	       sizeof(rec_params->gprs));
+
+	/* Initialize the measurement descriptior structure */
+	measure_desc.desc_type = MEASURE_DESC_TYPE_REC;
+	measure_desc.len = sizeof(struct measurement_desc_rec);
+	memcpy(measure_desc.rim,
+	       &rd->measurement[RIM_MEASUREMENT_SLOT],
+	       measurement_get_size(rd->algorithm));
+
+	/*
+	 * Hashing the REC params structure and store the result in the
+	 * measurement descriptor structure.
+	 */
+	measurement_hash_compute(rd->algorithm,
+				rec_params_measured,
+				sizeof(*rec_params_measured),
+				measure_desc.content);
+
+	/*
+	 * Hashing the measurement descriptor structure; the result is the
+	 * updated RIM.
+	 */
+	measurement_hash_compute(rd->algorithm,
+			       &measure_desc,
+			       sizeof(measure_desc),
+			       rd->measurement[RIM_MEASUREMENT_SLOT]);
+}
+
+static void init_rec_sysregs(struct rec *rec, unsigned long mpidr)
+{
+	/* Set non-zero values only */
+	rec->sysregs.pmcr_el0 = PMCR_EL0_RES1;
+	rec->sysregs.sctlr_el1 = SCTLR_EL1_FLAGS;
+	rec->sysregs.mdscr_el1 = MDSCR_EL1_TDCC_BIT;
+	rec->sysregs.vmpidr_el2 = mpidr | VMPIDR_EL2_RES1;
+
+	rec->sysregs.cnthctl_el2 = CNTHCTL_EL2_NO_TRAPS;
+}
+
+/*
+ * Starting level of the stage 2 translation
+ * lookup to VTCR_EL2.SL0[7:6].
+ */
+static const unsigned long sl0_val[] = {
+	VTCR_SL0_4K_L0,
+	VTCR_SL0_4K_L1,
+	VTCR_SL0_4K_L2,
+	VTCR_SL0_4K_L3
+};
+
+static unsigned long realm_vtcr(struct rd *rd)
+{
+	unsigned long t0sz, sl0;
+	unsigned long vtcr = is_feat_vmid16_present() ?
+				(VTCR_FLAGS | VTCR_VS) : VTCR_FLAGS;
+	int s2_starting_level = realm_rtt_starting_level(rd);
+
+	/* TODO: Support LPA2 with -1 */
+	assert((s2_starting_level >= 0) && (s2_starting_level <= 3));
+	sl0 = sl0_val[s2_starting_level];
+
+	t0sz = 64UL - realm_ipa_bits(rd);
+	t0sz &= MASK(VTCR_T0SZ);
+
+	vtcr |= t0sz;
+	vtcr |= sl0;
+
+	return vtcr;
+}
+
+static void init_common_sysregs(struct rec *rec, struct rd *rd)
+{
+	/* Set non-zero values only */
+	rec->common_sysregs.hcr_el2 = HCR_FLAGS;
+	rec->common_sysregs.vtcr_el2 =  realm_vtcr(rd);
+	rec->common_sysregs.vttbr_el2 = granule_addr(rd->s2_ctx.g_rtt);
+	rec->common_sysregs.vttbr_el2 &= MASK(TTBRx_EL2_BADDR);
+	rec->common_sysregs.vttbr_el2 |= INPLACE(VTTBR_EL2_VMID, rd->s2_ctx.vmid);
+}
+
+static void init_rec_regs(struct rec *rec,
+			  struct rmi_rec_params *rec_params,
+			  struct rd *rd)
+{
+	unsigned int i;
+
+	/*
+	 * We only need to set non-zero values here because we're intializing
+	 * data structures in the rec granule which was just converted from
+	 * the DELEGATED state to REC state, and we can rely on the RMM
+	 * invariant that DELEGATED granules are always zero-filled.
+	 */
+
+	for (i = 0U; i < REC_CREATE_NR_GPRS; i++) {
+		rec->regs[i] = rec_params->gprs[i];
+	}
+
+	rec->pc = rec_params->pc;
+	rec->pstate = SPSR_EL2_MODE_EL1h |
+		      SPSR_EL2_nRW_AARCH64 |
+		      SPSR_EL2_F_BIT |
+		      SPSR_EL2_I_BIT |
+		      SPSR_EL2_A_BIT |
+		      SPSR_EL2_D_BIT;
+
+	init_rec_sysregs(rec, rec_params->mpidr);
+	init_common_sysregs(rec, rd);
+}
+
+/*
+ * This function will only be invoked when the REC create fails
+ * or when REC is being destroyed. Hence the REC will not be in
+ * use when this function is called and therefore no lock is
+ * acquired before its invocation.
+ */
+static void free_rec_aux_granules(struct granule *rec_aux[],
+				  unsigned int cnt, bool scrub)
+{
+	for (unsigned int i = 0U; i < cnt; i++) {
+		struct granule *g_rec_aux = rec_aux[i];
+
+		granule_lock(g_rec_aux, GRANULE_STATE_REC_AUX);
+		if (scrub) {
+			granule_memzero(g_rec_aux, SLOT_REC_AUX0 + i);
+		}
+		granule_unlock_transition(g_rec_aux, GRANULE_STATE_DELEGATED);
+	}
+}
+
+unsigned long smc_rec_create(unsigned long rec_addr,
+			     unsigned long rd_addr,
+			     unsigned long rec_params_addr)
+{
+	struct granule *g_rd;
+	struct granule *g_rec;
+	struct granule *rec_aux_granules[MAX_REC_AUX_GRANULES];
+	struct granule *g_rec_params;
+	struct rec *rec;
+	struct rd *rd;
+	struct rmi_rec_params rec_params;
+	unsigned long rec_idx;
+	enum granule_state new_rec_state = GRANULE_STATE_DELEGATED;
+	unsigned long ret;
+	bool ns_access_ok;
+	unsigned int num_rec_aux;
+
+	g_rec_params = find_granule(rec_params_addr);
+	if ((g_rec_params == NULL) || (g_rec_params->state != GRANULE_STATE_NS)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	ns_access_ok = ns_buffer_read(SLOT_NS, g_rec_params, 0U,
+				      sizeof(rec_params), &rec_params);
+
+	if (!ns_access_ok) {
+		return RMI_ERROR_INPUT;
+	}
+
+	num_rec_aux = (unsigned int)rec_params.num_aux;
+	if (num_rec_aux > MAX_REC_AUX_GRANULES) {
+		return RMI_ERROR_INPUT;
+	}
+
+	/* Loop through rec_aux_granules and transit them */
+	for (unsigned int i = 0U; i < num_rec_aux; i++) {
+		struct granule *g_rec_aux = find_lock_granule(
+						rec_params.aux[i],
+						GRANULE_STATE_DELEGATED);
+		if (g_rec_aux == NULL) {
+			free_rec_aux_granules(rec_aux_granules, i, false);
+			return RMI_ERROR_INPUT;
+		}
+		granule_unlock_transition(g_rec_aux, GRANULE_STATE_REC_AUX);
+		rec_aux_granules[i] = g_rec_aux;
+	}
+
+	if (!find_lock_two_granules(rec_addr,
+				GRANULE_STATE_DELEGATED,
+				&g_rec,
+				rd_addr,
+				GRANULE_STATE_RD,
+				&g_rd)) {
+		ret = RMI_ERROR_INPUT;
+		goto out_free_aux;
+	}
+
+	rec = granule_map(g_rec, SLOT_REC);
+	rd = granule_map(g_rd, SLOT_RD);
+
+	if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
+		ret = RMI_ERROR_REALM;
+		goto out_unmap;
+	}
+
+	rec_idx = get_rd_rec_count_locked(rd);
+	if (!mpidr_is_valid(rec_params.mpidr) ||
+	   (rec_idx != mpidr_to_rec_idx(rec_params.mpidr))) {
+		ret = RMI_ERROR_INPUT;
+		goto out_unmap;
+	}
+
+	/* Verify the auxiliary granule count with rd lock held */
+	if (num_rec_aux != rd->num_rec_aux) {
+		ret = RMI_ERROR_INPUT;
+		goto out_unmap;
+	}
+
+	rec->g_rec = g_rec;
+	rec->rec_idx = rec_idx;
+
+	init_rec_regs(rec, &rec_params, rd);
+	gic_cpu_state_init(&rec->sysregs.gicstate);
+
+	/* Copy addresses of auxiliary granules */
+	(void)memcpy(rec->g_aux, rec_aux_granules,
+			num_rec_aux * sizeof(rec->g_aux[0]));
+
+	rec->num_rec_aux = num_rec_aux;
+
+	rec->realm_info.ipa_bits = realm_ipa_bits(rd);
+	rec->realm_info.s2_starting_level = realm_rtt_starting_level(rd);
+	rec->realm_info.g_rtt = rd->s2_ctx.g_rtt;
+	rec->realm_info.g_rd = g_rd;
+
+	rec_params_measure(rd, &rec_params);
+
+	/*
+	 * RD has a lock-free access from RMI_REC_DESTROY, hence increment
+	 * refcount atomically. Also, since the granule is only used for
+	 * refcount update, only an atomic operation will suffice and
+	 * release/acquire semantics are not required.
+	 */
+	atomic_granule_get(g_rd);
+	new_rec_state = GRANULE_STATE_REC;
+	rec->runnable = rec_params.flags & REC_PARAMS_FLAG_RUNNABLE;
+
+	rec->alloc_info.ctx_initialised = false;
+	/* Initialize attestation state */
+	rec->token_sign_ctx.state = ATTEST_SIGN_NOT_STARTED;
+
+	set_rd_rec_count(rd, rec_idx + 1U);
+
+	ret = RMI_SUCCESS;
+
+out_unmap:
+	buffer_unmap(rd);
+	buffer_unmap(rec);
+
+	granule_unlock(g_rd);
+	granule_unlock_transition(g_rec, new_rec_state);
+
+out_free_aux:
+	if (ret != RMI_SUCCESS) {
+		free_rec_aux_granules(rec_aux_granules, num_rec_aux, false);
+	}
+	return ret;
+}
+
+unsigned long smc_rec_destroy(unsigned long rec_addr)
+{
+	struct granule *g_rec;
+	struct granule *g_rd;
+	struct rec *rec;
+
+	/* REC should not be destroyed if refcount != 0 */
+	g_rec = find_lock_unused_granule(rec_addr, GRANULE_STATE_REC);
+	if (ptr_is_err(g_rec)) {
+		return (unsigned long)ptr_status(g_rec);
+	}
+
+	rec = granule_map(g_rec, SLOT_REC);
+
+	g_rd = rec->realm_info.g_rd;
+
+	/* Free and scrub the auxiliary granules */
+	free_rec_aux_granules(rec->g_aux, rec->num_rec_aux, true);
+
+	granule_memzero_mapped(rec);
+	buffer_unmap(rec);
+
+	granule_unlock_transition(g_rec, GRANULE_STATE_DELEGATED);
+
+	/*
+	 * Decrement refcount. The refcount should be balanced before
+	 * RMI_REC_DESTROY returns, and until this occurs a transient
+	 * over-estimate of the refcount (in-between the unlock and decreasing
+	 * the refcount) is legitimate. Also, since the granule is only used for
+	 * refcount update, only an atomic operation will suffice and
+	 * release/acquire semantics are not required.
+	 */
+	atomic_granule_put(g_rd);
+
+	return RMI_SUCCESS;
+}
+
+void smc_rec_aux_count(unsigned long rd_addr, struct smc_result *ret_struct)
+{
+	unsigned int num_rec_aux;
+	struct granule *g_rd;
+	struct rd *rd;
+
+	g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
+	if (g_rd == NULL) {
+		ret_struct->x[0] = RMI_ERROR_INPUT;
+		return;
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+	num_rec_aux = rd->num_rec_aux;
+	buffer_unmap(rd);
+	granule_unlock(g_rd);
+
+	ret_struct->x[0] = RMI_SUCCESS;
+	ret_struct->x[1] = (unsigned long)num_rec_aux;
+}
+
+unsigned long smc_psci_complete(unsigned long calling_rec_addr,
+				unsigned long target_rec_addr)
+{
+	struct granule *g_calling_rec, *g_target_rec;
+	struct rec  *calling_rec, *target_rec;
+	unsigned long ret;
+
+	assert(calling_rec_addr != 0UL);
+	assert(target_rec_addr != 0UL);
+
+	if (!GRANULE_ALIGNED(calling_rec_addr)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	if (!GRANULE_ALIGNED(target_rec_addr)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	if (!find_lock_two_granules(calling_rec_addr,
+					GRANULE_STATE_REC,
+					&g_calling_rec,
+					target_rec_addr,
+					GRANULE_STATE_REC,
+					&g_target_rec)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	/*
+	 * The access to a REC from RMI_REC_ENTER is only protected by the
+	 * reference counter. Here, we may access the volatile (non constant)
+	 * members of REC structure (such as rec->running) only if the counter
+	 * is zero.
+	 */
+	if (granule_refcount_read_acquire(g_calling_rec) != 0UL) {
+		/*
+		 * The `calling` REC is running on another PE and therefore it
+		 * may not have a pending PSCI request.
+		 */
+		ret = RMI_ERROR_INPUT;
+		goto out_unlock;
+	}
+
+	calling_rec = granule_map(g_calling_rec, SLOT_REC);
+	target_rec = granule_map(g_target_rec, SLOT_REC2);
+
+	ret = psci_complete_request(calling_rec, target_rec);
+
+	buffer_unmap(target_rec);
+	buffer_unmap(calling_rec);
+out_unlock:
+	granule_unlock(g_calling_rec);
+	granule_unlock(g_target_rec);
+
+	return ret;
+}
diff --git a/runtime/rmi/rtt.c b/runtime/rmi/rtt.c
new file mode 100644
index 0000000..e474e5d
--- /dev/null
+++ b/runtime/rmi/rtt.c
@@ -0,0 +1,1299 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <buffer.h>
+#include <granule.h>
+#include <measurement.h>
+#include <realm.h>
+#include <ripas.h>
+#include <smc-handler.h>
+#include <smc-rmi.h>
+#include <smc.h>
+#include <stddef.h>
+#include <string.h>
+#include <table.h>
+
+/*
+ * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
+ */
+static bool validate_map_addr(unsigned long map_addr,
+			      unsigned long level,
+			      struct rd *rd)
+{
+
+	if (map_addr >= realm_ipa_size(rd)) {
+		return false;
+	}
+	if (!addr_is_level_aligned(map_addr, level)) {
+		return false;
+	}
+	return true;
+}
+
+/*
+ * Structure commands can operate on all RTTs except for the root RTT so
+ * the minimal valid level is the stage 2 starting level + 1.
+ */
+static bool validate_rtt_structure_cmds(unsigned long map_addr,
+					long level,
+					struct rd *rd)
+{
+	int min_level = realm_rtt_starting_level(rd) + 1;
+
+	if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
+		return false;
+	}
+	return validate_map_addr(map_addr, level, rd);
+}
+
+/*
+ * Map/Unmap commands can operate up to a level 2 block entry so min_level is
+ * the smallest block size.
+ */
+static bool validate_rtt_map_cmds(unsigned long map_addr,
+				  long level,
+				  struct rd *rd)
+{
+	if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
+		return false;
+	}
+	return validate_map_addr(map_addr, level, rd);
+}
+
+/*
+ * Entry commands can operate on any entry so the minimal valid level is the
+ * stage 2 starting level.
+ */
+static bool validate_rtt_entry_cmds(unsigned long map_addr,
+				    long level,
+				    struct rd *rd)
+{
+	if ((level < realm_rtt_starting_level(rd)) ||
+	    (level > RTT_PAGE_LEVEL)) {
+		return false;
+	}
+	return validate_map_addr(map_addr, level, rd);
+}
+
+unsigned long smc_rtt_create(unsigned long rtt_addr,
+			     unsigned long rd_addr,
+			     unsigned long map_addr,
+			     unsigned long ulevel)
+{
+	struct granule *g_rd;
+	struct granule *g_tbl;
+	struct rd *rd;
+	struct granule *g_table_root;
+	struct rtt_walk wi;
+	unsigned long *s2tt, *parent_s2tt, parent_s2tte;
+	long level = (long)ulevel;
+	unsigned long ipa_bits;
+	unsigned long ret;
+	struct realm_s2_context s2_ctx;
+	int sl;
+
+	if (!find_lock_two_granules(rtt_addr,
+				    GRANULE_STATE_DELEGATED,
+				    &g_tbl,
+				    rd_addr,
+				    GRANULE_STATE_RD,
+				    &g_rd)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+
+	if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
+		buffer_unmap(rd);
+		granule_unlock(g_rd);
+		granule_unlock(g_tbl);
+		return RMI_ERROR_INPUT;
+	}
+
+	g_table_root = rd->s2_ctx.g_rtt;
+	sl = realm_rtt_starting_level(rd);
+	ipa_bits = realm_ipa_bits(rd);
+	s2_ctx = rd->s2_ctx;
+	buffer_unmap(rd);
+
+	/*
+	 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
+	 * ensure deadlock free locking guarentee.
+	 */
+	granule_lock(g_table_root, GRANULE_STATE_RTT);
+
+	/* Unlock RD after locking RTT Root */
+	granule_unlock(g_rd);
+
+	rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
+				map_addr, level - 1L, &wi);
+	if (wi.last_level != level - 1L) {
+		ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
+		goto out_unlock_llt;
+	}
+
+	parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
+	parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
+	s2tt = granule_map(g_tbl, SLOT_DELEGATED);
+
+	if (s2tte_is_unassigned(parent_s2tte)) {
+		/*
+		 * Note that if map_addr is an Unprotected IPA, the RIPAS field
+		 * is guaranteed to be zero, in both parent and child s2ttes.
+		 */
+		enum ripas ripas = s2tte_get_ripas(parent_s2tte);
+
+		s2tt_init_unassigned(s2tt, ripas);
+
+		/*
+		 * Increase the refcount of the parent, the granule was
+		 * locked while table walking and hand-over-hand locking.
+		 * Atomicity and acquire/release semantics not required because
+		 * the table is accessed always locked.
+		 */
+		__granule_get(wi.g_llt);
+
+	} else if (s2tte_is_destroyed(parent_s2tte)) {
+		s2tt_init_destroyed(s2tt);
+		__granule_get(wi.g_llt);
+
+	} else if (s2tte_is_assigned(parent_s2tte, level - 1L)) {
+		unsigned long block_pa;
+
+		/*
+		 * We should observe parent assigned s2tte only when
+		 * we create tables above this level.
+		 */
+		assert(level > RTT_MIN_BLOCK_LEVEL);
+
+		block_pa = s2tte_pa(parent_s2tte, level - 1L);
+
+		s2tt_init_assigned_empty(s2tt, block_pa, level);
+
+		/*
+		 * Increase the refcount to mark the granule as in-use. refcount
+		 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
+		 */
+		__granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
+
+	} else if (s2tte_is_valid(parent_s2tte, level - 1L)) {
+		unsigned long block_pa;
+
+		/*
+		 * We should observe parent valid s2tte only when
+		 * we create tables above this level.
+		 */
+		assert(level > RTT_MIN_BLOCK_LEVEL);
+
+		/*
+		 * Break before make. This may cause spurious S2 aborts.
+		 */
+		s2tte_write(&parent_s2tt[wi.index], 0UL);
+		invalidate_block(&s2_ctx, map_addr);
+
+		block_pa = s2tte_pa(parent_s2tte, level - 1L);
+
+		s2tt_init_valid(s2tt, block_pa, level);
+
+		/*
+		 * Increase the refcount to mark the granule as in-use. refcount
+		 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
+		 */
+		__granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
+
+	} else if (s2tte_is_valid_ns(parent_s2tte, level - 1L)) {
+		unsigned long block_pa;
+
+		/*
+		 * We should observe parent valid_ns s2tte only when
+		 * we create tables above this level.
+		 */
+		assert(level > RTT_MIN_BLOCK_LEVEL);
+
+		/*
+		 * Break before make. This may cause spurious S2 aborts.
+		 */
+		s2tte_write(&parent_s2tt[wi.index], 0UL);
+		invalidate_block(&s2_ctx, map_addr);
+
+		block_pa = s2tte_pa(parent_s2tte, level - 1L);
+
+		s2tt_init_valid_ns(s2tt, block_pa, level);
+
+		/*
+		 * Increase the refcount to mark the granule as in-use. refcount
+		 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
+		 */
+		__granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
+
+	} else if (s2tte_is_table(parent_s2tte, level - 1L)) {
+		ret = pack_return_code(RMI_ERROR_RTT,
+					(unsigned int)(level - 1L));
+		goto out_unmap_table;
+
+	} else {
+		assert(false);
+	}
+
+	ret = RMI_SUCCESS;
+
+	granule_set_state(g_tbl, GRANULE_STATE_RTT);
+
+	parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
+	s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
+
+out_unmap_table:
+	buffer_unmap(s2tt);
+	buffer_unmap(parent_s2tt);
+out_unlock_llt:
+	granule_unlock(wi.g_llt);
+	granule_unlock(g_tbl);
+	return ret;
+}
+
+unsigned long smc_rtt_fold(unsigned long rtt_addr,
+			   unsigned long rd_addr,
+			   unsigned long map_addr,
+			   unsigned long ulevel)
+{
+	struct granule *g_rd;
+	struct granule *g_tbl;
+	struct rd *rd;
+	struct granule *g_table_root;
+	struct rtt_walk wi;
+	unsigned long *table, *parent_s2tt, parent_s2tte;
+	long level = (long)ulevel;
+	unsigned long ipa_bits;
+	unsigned long ret;
+	struct realm_s2_context s2_ctx;
+	int sl;
+	enum ripas ripas;
+
+	g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
+	if (g_rd == NULL) {
+		return RMI_ERROR_INPUT;
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+
+	if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
+		buffer_unmap(rd);
+		granule_unlock(g_rd);
+		return RMI_ERROR_INPUT;
+	}
+
+	g_table_root = rd->s2_ctx.g_rtt;
+	sl = realm_rtt_starting_level(rd);
+	ipa_bits = realm_ipa_bits(rd);
+	s2_ctx = rd->s2_ctx;
+	buffer_unmap(rd);
+	granule_lock(g_table_root, GRANULE_STATE_RTT);
+	granule_unlock(g_rd);
+
+	rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
+				map_addr, level - 1L, &wi);
+	if (wi.last_level != level - 1UL) {
+		ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
+		goto out_unlock_parent_table;
+	}
+
+	parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
+	parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
+	if (!s2tte_is_table(parent_s2tte, level - 1L)) {
+		ret = pack_return_code(RMI_ERROR_RTT,
+					(unsigned int)(level - 1L));
+		goto out_unmap_parent_table;
+	}
+
+	/*
+	 * Check that the 'rtt_addr' RTT is used at (map_addr, level).
+	 * Note that this also verifies that the rtt_addr is properly aligned.
+	 */
+	if (rtt_addr != s2tte_pa_table(parent_s2tte, level - 1L)) {
+		ret = pack_return_code(RMI_ERROR_RTT,
+					(unsigned int)(level - 1L));
+		goto out_unmap_parent_table;
+	}
+
+	g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
+
+	/*
+	 * A table descriptor S2TTE always points to a TABLE granule.
+	 */
+	assert(g_tbl);
+
+	table = granule_map(g_tbl, SLOT_RTT2);
+
+	/*
+	 * The command can succeed only if all 512 S2TTEs are of the same type.
+	 * We first check the table's ref. counter to speed up the case when
+	 * the host makes a guess whether a memory region can be folded.
+	 */
+	if (g_tbl->refcount == 0UL) {
+		if (table_is_destroyed_block(table)) {
+			parent_s2tte = s2tte_create_destroyed();
+			__granule_put(wi.g_llt);
+
+		} else if (table_is_unassigned_block(table, &ripas)) {
+			/*
+			 * Note that if map_addr is an Unprotected IPA, the
+			 * RIPAS field is guaranteed to be zero, in both parent
+			 * and child s2ttes.
+			 */
+			parent_s2tte = s2tte_create_unassigned(ripas);
+			__granule_put(wi.g_llt);
+		} else {
+			/*
+			 * The table holds a mixture of destroyed and
+			 * unassigned entries.
+			 */
+			ret = RMI_ERROR_IN_USE;
+			goto out_unmap_table;
+		}
+
+	} else if (g_tbl->refcount == S2TTES_PER_S2TT) {
+
+		unsigned long s2tte, block_pa;
+
+		/* The RMM specification does not allow creating block
+		 * entries less than RTT_MIN_BLOCK_LEVEL even though
+		 * permitted by the Arm Architecture.
+		 * Hence ensure that the table being folded is at a level
+		 * higher than the RTT_MIN_BLOCK_LEVEL.
+		 *
+		 * A fully populated table cannot be destroyed if that
+		 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
+		 */
+		if (level <= RTT_MIN_BLOCK_LEVEL) {
+			ret = RMI_ERROR_IN_USE;
+			goto out_unmap_table;
+		}
+
+		s2tte = s2tte_read(&table[0]);
+		block_pa = s2tte_pa(s2tte, level - 1L);
+
+		/*
+		 * The table must also refer to a contiguous block through
+		 * the same type of s2tte, either Assigned, Valid  or Valid_NS.
+		 */
+		if (table_maps_assigned_block(table, level)) {
+			parent_s2tte = s2tte_create_assigned_empty(block_pa, level - 1L);
+		} else if (table_maps_valid_block(table, level)) {
+			parent_s2tte = s2tte_create_valid(block_pa, level - 1L);
+		} else if (table_maps_valid_ns_block(table, level)) {
+			parent_s2tte = s2tte_create_valid_ns(block_pa, level - 1L);
+		/* This 'else' case should not happen */
+		} else {
+			assert(false);
+		}
+
+		__granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
+	} else {
+		/*
+		 * The table holds a mixture of different types of s2ttes.
+		 */
+		ret = RMI_ERROR_IN_USE;
+		goto out_unmap_table;
+	}
+
+	ret = RMI_SUCCESS;
+
+	/*
+	 * Break before make.
+	 */
+	s2tte_write(&parent_s2tt[wi.index], 0UL);
+
+	if (s2tte_is_valid(parent_s2tte, level - 1L) ||
+	    s2tte_is_valid_ns(parent_s2tte, level - 1L)) {
+		invalidate_pages_in_block(&s2_ctx, map_addr);
+	} else {
+		invalidate_block(&s2_ctx, map_addr);
+	}
+
+	s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
+
+	granule_memzero_mapped(table);
+	granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
+
+out_unmap_table:
+	buffer_unmap(table);
+	granule_unlock(g_tbl);
+out_unmap_parent_table:
+	buffer_unmap(parent_s2tt);
+out_unlock_parent_table:
+	granule_unlock(wi.g_llt);
+	return ret;
+}
+
+unsigned long smc_rtt_destroy(unsigned long rtt_addr,
+			      unsigned long rd_addr,
+			      unsigned long map_addr,
+			      unsigned long ulevel)
+{
+	struct granule *g_rd;
+	struct granule *g_tbl;
+	struct rd *rd;
+	struct granule *g_table_root;
+	struct rtt_walk wi;
+	unsigned long *table, *parent_s2tt, parent_s2tte;
+	long level = (long)ulevel;
+	unsigned long ipa_bits;
+	unsigned long ret;
+	struct realm_s2_context s2_ctx;
+	int sl;
+	bool in_par;
+
+	g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
+	if (g_rd == NULL) {
+		return RMI_ERROR_INPUT;
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+
+	if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
+		buffer_unmap(rd);
+		granule_unlock(g_rd);
+		return RMI_ERROR_INPUT;
+	}
+
+	g_table_root = rd->s2_ctx.g_rtt;
+	sl = realm_rtt_starting_level(rd);
+	ipa_bits = realm_ipa_bits(rd);
+	s2_ctx = rd->s2_ctx;
+	in_par = addr_in_par(rd, map_addr);
+	buffer_unmap(rd);
+	granule_lock(g_table_root, GRANULE_STATE_RTT);
+	granule_unlock(g_rd);
+
+	rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
+				map_addr, level - 1L, &wi);
+	if (wi.last_level != level - 1UL) {
+		ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
+		goto out_unlock_parent_table;
+	}
+
+	parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
+	parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
+	if (!s2tte_is_table(parent_s2tte, level - 1L)) {
+		ret = pack_return_code(RMI_ERROR_RTT,
+					(unsigned int)(level - 1L));
+		goto out_unmap_parent_table;
+	}
+
+	/*
+	 * Check that the 'rtt_addr' RTT is used at (map_addr, level).
+	 * Note that this also verifies that the rtt_addr is properly aligned.
+	 */
+	if (rtt_addr != s2tte_pa_table(parent_s2tte, level - 1L)) {
+		ret = RMI_ERROR_INPUT;
+		goto out_unmap_parent_table;
+	}
+
+	/*
+	 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
+	 * as an internal granule.
+	 */
+	g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
+
+	/*
+	 * A table descriptor S2TTE always points to a TABLE granule.
+	 */
+	assert(g_tbl != NULL);
+
+	/*
+	 * Read the refcount value. RTT granule is always accessed locked, thus
+	 * the refcount can be accessed without atomic operations.
+	 */
+	if (g_tbl->refcount != 0UL) {
+		ret = RMI_ERROR_IN_USE;
+		goto out_unlock_table;
+	}
+
+	ret = RMI_SUCCESS;
+
+	table = granule_map(g_tbl, SLOT_RTT2);
+
+	if (in_par) {
+		parent_s2tte = s2tte_create_destroyed();
+	} else {
+		parent_s2tte = s2tte_create_invalid_ns();
+	}
+
+	__granule_put(wi.g_llt);
+
+	/*
+	 * Break before make. Note that this may cause spurious S2 aborts.
+	 */
+	s2tte_write(&parent_s2tt[wi.index], 0UL);
+	invalidate_block(&s2_ctx, map_addr);
+	s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
+
+	granule_memzero_mapped(table);
+	granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
+
+	buffer_unmap(table);
+out_unlock_table:
+	granule_unlock(g_tbl);
+out_unmap_parent_table:
+	buffer_unmap(parent_s2tt);
+out_unlock_parent_table:
+	granule_unlock(wi.g_llt);
+	return ret;
+}
+
+enum map_unmap_ns_op {
+	MAP_NS,
+	UNMAP_NS
+};
+
+/*
+ * We don't hold a reference on the NS granule when it is
+ * mapped into a realm. Instead we rely on the guarantees
+ * provided by the architecture to ensure that a NS access
+ * to a protected granule is prohibited even within the realm.
+ */
+static unsigned long map_unmap_ns(unsigned long rd_addr,
+				  unsigned long map_addr,
+				  long level,
+				  unsigned long host_s2tte,
+				  enum map_unmap_ns_op op)
+{
+	struct granule *g_rd;
+	struct rd *rd;
+	struct granule *g_table_root;
+	unsigned long *s2tt, s2tte;
+	struct rtt_walk wi;
+	unsigned long ipa_bits;
+	unsigned long ret;
+	struct realm_s2_context s2_ctx;
+	int sl;
+
+	g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
+	if (g_rd == NULL) {
+		return RMI_ERROR_INPUT;
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+
+	if (!validate_rtt_map_cmds(map_addr, level, rd)) {
+		buffer_unmap(rd);
+		granule_unlock(g_rd);
+		return RMI_ERROR_INPUT;
+	}
+
+	g_table_root = rd->s2_ctx.g_rtt;
+	sl = realm_rtt_starting_level(rd);
+	ipa_bits = realm_ipa_bits(rd);
+
+	/*
+	 * We don't have to check PAR boundaries for unmap_ns
+	 * operation because we already test that the s2tte is Valid_NS
+	 * and only outside-PAR IPAs can be translated by such s2tte.
+	 *
+	 * For "map_ns", however, the s2tte is verified to be Unassigned
+	 * but both inside & outside PAR IPAs can be translated by such s2ttes.
+	 */
+	if ((op == MAP_NS) && addr_in_par(rd, map_addr)) {
+		buffer_unmap(rd);
+		granule_unlock(g_rd);
+		return RMI_ERROR_INPUT;
+	}
+
+	s2_ctx = rd->s2_ctx;
+	buffer_unmap(rd);
+
+	granule_lock(g_table_root, GRANULE_STATE_RTT);
+	granule_unlock(g_rd);
+
+	rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
+				map_addr, level, &wi);
+	if (wi.last_level != level) {
+		ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
+		goto out_unlock_llt;
+	}
+
+	s2tt = granule_map(wi.g_llt, SLOT_RTT);
+	s2tte = s2tte_read(&s2tt[wi.index]);
+
+	if (op == MAP_NS) {
+		if (!s2tte_is_unassigned(s2tte)) {
+			ret = pack_return_code(RMI_ERROR_RTT,
+						(unsigned int)level);
+			goto out_unmap_table;
+		}
+
+		s2tte = s2tte_create_valid_ns(host_s2tte, level);
+		s2tte_write(&s2tt[wi.index], s2tte);
+		__granule_get(wi.g_llt);
+
+	} else if (op == UNMAP_NS) {
+		/*
+		 * The following check also verifies that map_addr is outside
+		 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
+		 */
+		if (!s2tte_is_valid_ns(s2tte, level)) {
+			ret = pack_return_code(RMI_ERROR_RTT,
+						(unsigned int)level);
+			goto out_unmap_table;
+		}
+
+		s2tte = s2tte_create_invalid_ns();
+		s2tte_write(&s2tt[wi.index], s2tte);
+		__granule_put(wi.g_llt);
+		if (level == RTT_PAGE_LEVEL) {
+			invalidate_page(&s2_ctx, map_addr);
+		} else {
+			invalidate_block(&s2_ctx, map_addr);
+		}
+	}
+
+	ret = RMI_SUCCESS;
+
+out_unmap_table:
+	buffer_unmap(s2tt);
+out_unlock_llt:
+	granule_unlock(wi.g_llt);
+	return ret;
+}
+
+unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
+				      unsigned long map_addr,
+				      unsigned long ulevel,
+				      unsigned long s2tte)
+{
+	long level = (long)ulevel;
+
+	if (!host_ns_s2tte_is_valid(s2tte, level)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	return map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS);
+}
+
+unsigned long smc_rtt_unmap_unprotected(unsigned long rd_addr,
+					unsigned long map_addr,
+					unsigned long ulevel)
+{
+	return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS);
+}
+
+void smc_rtt_read_entry(unsigned long rd_addr,
+			unsigned long map_addr,
+			unsigned long ulevel,
+			struct smc_result *ret)
+{
+	struct granule *g_rd, *g_rtt_root;
+	struct rd *rd;
+	struct rtt_walk wi;
+	unsigned long *s2tt, s2tte;
+	unsigned long ipa_bits;
+	long level = (long)ulevel;
+	int sl;
+
+	g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
+	if (g_rd == NULL) {
+		ret->x[0] = RMI_ERROR_INPUT;
+		return;
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+
+	if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
+		buffer_unmap(rd);
+		granule_unlock(g_rd);
+		ret->x[0] = RMI_ERROR_INPUT;
+		return;
+	}
+
+	g_rtt_root = rd->s2_ctx.g_rtt;
+	sl = realm_rtt_starting_level(rd);
+	ipa_bits = realm_ipa_bits(rd);
+	buffer_unmap(rd);
+
+	granule_lock(g_rtt_root, GRANULE_STATE_RTT);
+	granule_unlock(g_rd);
+
+	rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
+				map_addr, level, &wi);
+	s2tt = granule_map(wi.g_llt, SLOT_RTT);
+	s2tte = s2tte_read(&s2tt[wi.index]);
+	ret->x[1] =  wi.last_level;
+	ret->x[3] = 0UL;
+	ret->x[4] = 0UL;
+
+	if (s2tte_is_unassigned(s2tte)) {
+		enum ripas ripas = s2tte_get_ripas(s2tte);
+
+		ret->x[2] = RMI_RTT_STATE_UNASSIGNED;
+		ret->x[4] = (unsigned long)ripas;
+	} else if (s2tte_is_destroyed(s2tte)) {
+		ret->x[2] = RMI_RTT_STATE_DESTROYED;
+	} else if (s2tte_is_assigned(s2tte, wi.last_level)) {
+		ret->x[2] = RMI_RTT_STATE_ASSIGNED;
+		ret->x[3] = s2tte_pa(s2tte, wi.last_level);
+		ret->x[4] = RMI_EMPTY;
+	} else if (s2tte_is_valid(s2tte, wi.last_level)) {
+		ret->x[2] = RMI_RTT_STATE_ASSIGNED;
+		ret->x[3] = s2tte_pa(s2tte, wi.last_level);
+		ret->x[4] = RMI_RAM;
+	} else if (s2tte_is_valid_ns(s2tte, wi.last_level)) {
+		ret->x[2] = RMI_RTT_STATE_VALID_NS;
+		ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
+	} else if (s2tte_is_table(s2tte, wi.last_level)) {
+		ret->x[2] = RMI_RTT_STATE_TABLE;
+		ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
+	} else {
+		assert(false);
+	}
+
+	buffer_unmap(s2tt);
+	granule_unlock(wi.g_llt);
+
+	ret->x[0] = RMI_SUCCESS;
+}
+
+static void data_granule_measure(struct rd *rd, void *data,
+				 unsigned long ipa,
+				 unsigned long flags)
+{
+	struct measurement_desc_data measure_desc = {0};
+
+	/* Initialize the measurement descriptior structure */
+	measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
+	measure_desc.len = sizeof(struct measurement_desc_data);
+	measure_desc.ipa = ipa;
+	measure_desc.flags = flags;
+	memcpy(measure_desc.rim,
+	       &rd->measurement[RIM_MEASUREMENT_SLOT],
+	       measurement_get_size(rd->algorithm));
+
+	if (flags == RMI_MEASURE_CONTENT) {
+		/*
+		 * Hashing the data granules and store the result in the
+		 * measurement descriptor structure.
+		 */
+		measurement_hash_compute(rd->algorithm,
+					data,
+					GRANULE_SIZE,
+					measure_desc.content);
+	}
+
+	/*
+	 * Hashing the measurement descriptor structure; the result is the
+	 * updated RIM.
+	 */
+	measurement_hash_compute(rd->algorithm,
+			       &measure_desc,
+			       sizeof(measure_desc),
+			       rd->measurement[RIM_MEASUREMENT_SLOT]);
+}
+
+static unsigned long validate_data_create_unknown(unsigned long map_addr,
+						  struct rd *rd)
+{
+	if (!addr_in_par(rd, map_addr)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	return RMI_SUCCESS;
+}
+
+static unsigned long validate_data_create(unsigned long map_addr,
+					  struct rd *rd)
+{
+	if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
+		return RMI_ERROR_REALM;
+	}
+
+	return validate_data_create_unknown(map_addr, rd);
+}
+
+/*
+ * Implements both Data.Create and Data.CreateUnknown
+ *
+ * if @g_src == NULL, this implemented Data.CreateUnknown
+ * and otherwise this implemented Data.Create.
+ */
+static unsigned long data_create(unsigned long data_addr,
+				 unsigned long rd_addr,
+				 unsigned long map_addr,
+				 struct granule *g_src,
+				 unsigned long flags)
+{
+	struct granule *g_data;
+	struct granule *g_rd;
+	struct granule *g_table_root;
+	struct rd *rd;
+	struct rtt_walk wi;
+	unsigned long s2tte, *s2tt;
+	enum ripas ripas;
+	enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
+	unsigned long ipa_bits;
+	unsigned long ret;
+	int __unused meas_ret;
+	int sl;
+
+	if (!find_lock_two_granules(data_addr,
+				    GRANULE_STATE_DELEGATED,
+				    &g_data,
+				    rd_addr,
+				    GRANULE_STATE_RD,
+				    &g_rd)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+
+	ret = (g_src != NULL) ?
+		validate_data_create(map_addr, rd) :
+		validate_data_create_unknown(map_addr, rd);
+
+	if (ret != RMI_SUCCESS) {
+		goto out_unmap_rd;
+	}
+
+	g_table_root = rd->s2_ctx.g_rtt;
+	sl = realm_rtt_starting_level(rd);
+	ipa_bits = realm_ipa_bits(rd);
+	granule_lock(g_table_root, GRANULE_STATE_RTT);
+	rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
+			     map_addr, RTT_PAGE_LEVEL, &wi);
+	if (wi.last_level != RTT_PAGE_LEVEL) {
+		ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
+		goto out_unlock_ll_table;
+	}
+
+	s2tt = granule_map(wi.g_llt, SLOT_RTT);
+	s2tte = s2tte_read(&s2tt[wi.index]);
+	if (!s2tte_is_unassigned(s2tte)) {
+		ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
+		goto out_unmap_ll_table;
+	}
+
+	ripas = s2tte_get_ripas(s2tte);
+
+	if (g_src != NULL) {
+		bool ns_access_ok;
+		void *data = granule_map(g_data, SLOT_DELEGATED);
+
+		ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
+					      GRANULE_SIZE, data);
+
+		if (!ns_access_ok) {
+			/*
+			 * Some data may be copied before the failure. Zero
+			 * g_data granule as it will remain in delegated state.
+			 */
+			(void)memset(data, 0, GRANULE_SIZE);
+			buffer_unmap(data);
+			ret = RMI_ERROR_INPUT;
+			goto out_unmap_ll_table;
+		}
+
+
+		data_granule_measure(rd, data, map_addr, flags);
+
+		buffer_unmap(data);
+	}
+
+	new_data_state = GRANULE_STATE_DATA;
+
+	s2tte = (ripas == RMI_EMPTY) ?
+		s2tte_create_assigned_empty(data_addr, RTT_PAGE_LEVEL) :
+		s2tte_create_valid(data_addr, RTT_PAGE_LEVEL);
+
+	s2tte_write(&s2tt[wi.index], s2tte);
+	__granule_get(wi.g_llt);
+
+	ret = RMI_SUCCESS;
+
+out_unmap_ll_table:
+	buffer_unmap(s2tt);
+out_unlock_ll_table:
+	granule_unlock(wi.g_llt);
+out_unmap_rd:
+	buffer_unmap(rd);
+	granule_unlock(g_rd);
+	granule_unlock_transition(g_data, new_data_state);
+	return ret;
+}
+
+unsigned long smc_data_create(unsigned long data_addr,
+			      unsigned long rd_addr,
+			      unsigned long map_addr,
+			      unsigned long src_addr,
+			      unsigned long flags)
+{
+	struct granule *g_src;
+	unsigned long ret;
+
+	if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
+		return RMI_ERROR_INPUT;
+	}
+
+	g_src = find_granule(src_addr);
+	if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	ret = data_create(data_addr, rd_addr, map_addr, g_src, flags);
+
+	return ret;
+}
+
+unsigned long smc_data_create_unknown(unsigned long data_addr,
+				      unsigned long rd_addr,
+				      unsigned long map_addr)
+{
+	return data_create(data_addr, rd_addr, map_addr, NULL, 0);
+}
+
+unsigned long smc_data_destroy(unsigned long rd_addr,
+			       unsigned long map_addr)
+{
+	struct granule *g_data;
+	struct granule *g_rd;
+	struct granule *g_table_root;
+	struct rtt_walk wi;
+	unsigned long data_addr, s2tte, *s2tt;
+	struct rd *rd;
+	unsigned long ipa_bits;
+	unsigned long ret;
+	struct realm_s2_context s2_ctx;
+	bool valid;
+	int sl;
+
+	g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
+	if (g_rd == NULL) {
+		return RMI_ERROR_INPUT;
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+
+	if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
+		buffer_unmap(rd);
+		granule_unlock(g_rd);
+		return RMI_ERROR_INPUT;
+	}
+
+	g_table_root = rd->s2_ctx.g_rtt;
+	sl = realm_rtt_starting_level(rd);
+	ipa_bits = realm_ipa_bits(rd);
+	s2_ctx = rd->s2_ctx;
+	buffer_unmap(rd);
+
+	granule_lock(g_table_root, GRANULE_STATE_RTT);
+	granule_unlock(g_rd);
+
+	rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
+				map_addr, RTT_PAGE_LEVEL, &wi);
+	if (wi.last_level != RTT_PAGE_LEVEL) {
+		ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
+		goto out_unlock_ll_table;
+	}
+
+	s2tt = granule_map(wi.g_llt, SLOT_RTT);
+	s2tte = s2tte_read(&s2tt[wi.index]);
+
+	valid = s2tte_is_valid(s2tte, RTT_PAGE_LEVEL);
+
+	/*
+	 * Check if either HIPAS=ASSIGNED or map_addr is a
+	 * valid Protected IPA.
+	 */
+	if (!valid && !s2tte_is_assigned(s2tte, RTT_PAGE_LEVEL)) {
+		ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
+		goto out_unmap_ll_table;
+	}
+
+	data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
+
+	/*
+	 * We have already established either HIPAS=ASSIGNED or a valid mapping.
+	 * If valid, transition HIPAS to DESTROYED and if HIPAS=ASSIGNED,
+	 * transition to UNASSIGNED.
+	 */
+	s2tte = valid ? s2tte_create_destroyed() :
+			s2tte_create_unassigned(RMI_EMPTY);
+
+	s2tte_write(&s2tt[wi.index], s2tte);
+
+	if (valid) {
+		invalidate_page(&s2_ctx, map_addr);
+	}
+
+	__granule_put(wi.g_llt);
+
+	/*
+	 * Lock the data granule and check expected state. Correct locking order
+	 * is guaranteed because granule address is obtained from a locked
+	 * granule by table walk. This lock needs to be acquired before a state
+	 * transition to or from GRANULE_STATE_DATA for granule address can happen.
+	 */
+	g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
+	assert(g_data);
+	granule_memzero(g_data, SLOT_DELEGATED);
+	granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
+
+	ret = RMI_SUCCESS;
+
+out_unmap_ll_table:
+	buffer_unmap(s2tt);
+out_unlock_ll_table:
+	granule_unlock(wi.g_llt);
+
+	return ret;
+}
+
+static bool update_ripas(unsigned long *s2tte, unsigned long level,
+			 enum ripas ripas)
+{
+	if (s2tte_is_table(*s2tte, level)) {
+		return false;
+	}
+
+	if (s2tte_is_valid(*s2tte, level)) {
+		if (ripas == RMI_EMPTY) {
+			unsigned long pa = s2tte_pa(*s2tte, level);
+			*s2tte = s2tte_create_assigned_empty(pa, level);
+		}
+		return true;
+	}
+
+	if (s2tte_is_unassigned(*s2tte) || s2tte_is_assigned(*s2tte, level)) {
+		*s2tte |= s2tte_create_ripas(ripas);
+		return true;
+	}
+
+	return false;
+}
+
+static void ripas_granule_measure(struct rd *rd,
+				  unsigned long ipa,
+				  unsigned long level)
+{
+	struct measurement_desc_ripas measure_desc = {0};
+
+	/* Initialize the measurement descriptior structure */
+	measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
+	measure_desc.len = sizeof(struct measurement_desc_ripas);
+	measure_desc.ipa = ipa;
+	measure_desc.level = level;
+	memcpy(measure_desc.rim,
+	       &rd->measurement[RIM_MEASUREMENT_SLOT],
+	       measurement_get_size(rd->algorithm));
+
+	/*
+	 * Hashing the measurement descriptor structure; the result is the
+	 * updated RIM.
+	 */
+	measurement_hash_compute(rd->algorithm,
+				 &measure_desc,
+				 sizeof(measure_desc),
+				 rd->measurement[RIM_MEASUREMENT_SLOT]);
+}
+
+unsigned long smc_rtt_init_ripas(unsigned long rd_addr,
+				 unsigned long map_addr,
+				 unsigned long ulevel)
+{
+	struct granule *g_rd, *g_rtt_root;
+	struct rd *rd;
+	unsigned long ipa_bits;
+	struct rtt_walk wi;
+	unsigned long s2tte, *s2tt;
+	unsigned long ret;
+	long level = (long)ulevel;
+	int sl;
+
+	g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
+	if (g_rd == NULL) {
+		return RMI_ERROR_INPUT;
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+
+	if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
+		buffer_unmap(rd);
+		granule_unlock(g_rd);
+		return RMI_ERROR_REALM;
+	}
+
+	if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
+		buffer_unmap(rd);
+		granule_unlock(g_rd);
+		return RMI_ERROR_INPUT;
+	}
+
+	if (!addr_in_par(rd, map_addr)) {
+		buffer_unmap(rd);
+		granule_unlock(g_rd);
+		return RMI_ERROR_INPUT;
+	}
+
+	g_rtt_root = rd->s2_ctx.g_rtt;
+	sl = realm_rtt_starting_level(rd);
+	ipa_bits = realm_ipa_bits(rd);
+
+	granule_lock(g_rtt_root, GRANULE_STATE_RTT);
+	granule_unlock(g_rd);
+
+	rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
+				map_addr, level, &wi);
+	if (wi.last_level != level) {
+		ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
+		goto out_unlock_llt;
+	}
+
+	s2tt = granule_map(wi.g_llt, SLOT_RTT);
+	s2tte = s2tte_read(&s2tt[wi.index]);
+
+	/* Allowed only for HIPAS=UNASSIGNED */
+	if (s2tte_is_table(s2tte, level) || !s2tte_is_unassigned(s2tte)) {
+		ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
+		goto out_unmap_llt;
+	}
+
+	s2tte |= s2tte_create_ripas(RMI_RAM);
+
+	s2tte_write(&s2tt[wi.index], s2tte);
+
+	ripas_granule_measure(rd, map_addr, level);
+
+	ret = RMI_SUCCESS;
+
+out_unmap_llt:
+	buffer_unmap(s2tt);
+out_unlock_llt:
+	buffer_unmap(rd);
+	granule_unlock(wi.g_llt);
+	return ret;
+}
+
+unsigned long smc_rtt_set_ripas(unsigned long rd_addr,
+				unsigned long rec_addr,
+				unsigned long map_addr,
+				unsigned long ulevel,
+				unsigned long uripas)
+{
+	struct granule *g_rd, *g_rec, *g_rtt_root;
+	struct rec *rec;
+	struct rd *rd;
+	unsigned long map_size, ipa_bits;
+	struct rtt_walk wi;
+	unsigned long s2tte, *s2tt;
+	struct realm_s2_context s2_ctx;
+	long level = (long)ulevel;
+	enum ripas ripas = (enum ripas)uripas;
+	unsigned long ret;
+	bool valid;
+	int sl;
+
+	if (ripas > RMI_RAM) {
+		return RMI_ERROR_INPUT;
+	}
+
+	if (!find_lock_two_granules(rd_addr,
+				   GRANULE_STATE_RD,
+				   &g_rd,
+				   rec_addr,
+				   GRANULE_STATE_REC,
+				   &g_rec)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	if (granule_refcount_read_acquire(g_rec) != 0UL) {
+		ret = RMI_ERROR_IN_USE;
+		goto out_unlock_rec_rd;
+	}
+
+	rec = granule_map(g_rec, SLOT_REC);
+
+	if (g_rd != rec->realm_info.g_rd) {
+		ret = RMI_ERROR_REC;
+		goto out_unmap_rec;
+	}
+
+	if (ripas != rec->set_ripas.ripas) {
+		ret = RMI_ERROR_INPUT;
+		goto out_unmap_rec;
+	}
+
+	if (map_addr != rec->set_ripas.addr) {
+		/* Target region is not next chunk of requested region */
+		ret = RMI_ERROR_INPUT;
+		goto out_unmap_rec;
+	}
+
+	rd = granule_map(g_rd, SLOT_RD);
+
+	if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
+		ret = RMI_ERROR_INPUT;
+		goto out_unmap_rd;
+	}
+
+	map_size = s2tte_map_size(level);
+	if (map_addr + map_size > rec->set_ripas.end) {
+		/* Target region extends beyond end of requested region */
+		ret = RMI_ERROR_INPUT;
+		goto out_unmap_rd;
+	}
+
+	g_rtt_root = rd->s2_ctx.g_rtt;
+	sl = realm_rtt_starting_level(rd);
+	ipa_bits = realm_ipa_bits(rd);
+	s2_ctx = rd->s2_ctx;
+
+	granule_lock(g_rtt_root, GRANULE_STATE_RTT);
+
+	rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
+				map_addr, level, &wi);
+	if (wi.last_level != level) {
+		ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
+		goto out_unlock_llt;
+	}
+
+	s2tt = granule_map(wi.g_llt, SLOT_RTT);
+	s2tte = s2tte_read(&s2tt[wi.index]);
+
+	valid = s2tte_is_valid(s2tte, level);
+
+	if (!update_ripas(&s2tte, level, ripas)) {
+		ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
+		goto out_unmap_llt;
+	}
+
+	s2tte_write(&s2tt[wi.index], s2tte);
+
+	if (valid && (ripas == RMI_EMPTY)) {
+		if (level == RTT_PAGE_LEVEL) {
+			invalidate_page(&s2_ctx, map_addr);
+		} else {
+			invalidate_block(&s2_ctx, map_addr);
+		}
+	}
+
+	rec->set_ripas.addr += map_size;
+
+	ret = RMI_SUCCESS;
+
+out_unmap_llt:
+	buffer_unmap(s2tt);
+out_unlock_llt:
+	granule_unlock(wi.g_llt);
+out_unmap_rd:
+	buffer_unmap(rd);
+out_unmap_rec:
+	buffer_unmap(rec);
+out_unlock_rec_rd:
+	granule_unlock(g_rec);
+	granule_unlock(g_rd);
+	return ret;
+}
diff --git a/runtime/rmi/run.c b/runtime/rmi/run.c
new file mode 100644
index 0000000..27ec48e
--- /dev/null
+++ b/runtime/rmi/run.c
@@ -0,0 +1,304 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch.h>
+#include <debug.h>
+#include <esr.h>
+#include <gic.h>
+#include <granule.h>
+#include <inject_exp.h>
+#include <memory_alloc.h>
+#include <psci.h>
+#include <realm.h>
+#include <rec.h>
+#include <rsi-host-call.h>
+#include <smc-handler.h>
+#include <smc-rmi.h>
+#include <smc-rsi.h>
+#include <smc.h>
+#include <timers.h>
+
+static void reset_last_run_info(struct rec *rec)
+{
+	rec->last_run_info.esr = 0UL;
+}
+
+static bool complete_mmio_emulation(struct rec *rec, struct rmi_rec_entry *rec_entry)
+{
+	unsigned long esr = rec->last_run_info.esr;
+	unsigned int rt = esr_srt(esr);
+
+	if ((rec_entry->flags & REC_ENTRY_FLAG_EMUL_MMIO) == 0UL) {
+		return true;
+	}
+
+	if (((esr & ESR_EL2_EC_MASK) != ESR_EL2_EC_DATA_ABORT) ||
+	    !(esr & ESR_EL2_ABORT_ISV_BIT)) {
+		/*
+		 * MMIO emulation is requested but the REC did not exit with
+		 * an emulatable exit.
+		 */
+		return false;
+	}
+
+	/*
+	 * Emulate mmio read (unless the load is to xzr)
+	 */
+	if (!esr_is_write(esr) && (rt != 31U)) {
+		unsigned long val;
+
+		val = rec_entry->gprs[0] & access_mask(esr);
+
+		if (esr_sign_extend(esr)) {
+			unsigned int bit_count = access_len(esr) * 8U;
+			unsigned long mask = 1UL << (bit_count - 1U);
+
+			val = (val ^ mask) - mask;
+			if (!esr_sixty_four(esr)) {
+				val &= (1UL << 32U) - 1UL;
+			}
+		}
+
+		rec->regs[rt] = val;
+	}
+
+	rec->pc = rec->pc + 4UL;
+	return true;
+}
+
+static void complete_set_ripas(struct rec *rec)
+{
+	if (rec->set_ripas.start != rec->set_ripas.end) {
+		/* Pending request from Realm */
+		rec->regs[0] = RSI_SUCCESS;
+		rec->regs[1] = rec->set_ripas.addr;
+
+		rec->set_ripas.start = 0UL;
+		rec->set_ripas.end = 0UL;
+	}
+}
+
+static bool complete_sea_insertion(struct rec *rec, struct rmi_rec_entry *rec_entry)
+{
+	unsigned long esr = rec->last_run_info.esr;
+	unsigned long fipa;
+	unsigned long hpfar = rec->last_run_info.hpfar;
+
+	if ((rec_entry->flags & REC_ENTRY_FLAG_INJECT_SEA) == 0UL) {
+		return true;
+	}
+
+	if ((esr & ESR_EL2_EC_MASK) != ESR_EL2_EC_DATA_ABORT) {
+		return false;
+	}
+
+	fipa = (hpfar & HPFAR_EL2_FIPA_MASK) << HPFAR_EL2_FIPA_OFFSET;
+	if (addr_in_rec_par(rec, fipa)) {
+		return false;
+	}
+
+	inject_sync_idabort_rec(rec, ESR_EL2_ABORT_FSC_SEA);
+	return true;
+}
+
+
+static void complete_sysreg_emulation(struct rec *rec, struct rmi_rec_entry *rec_entry)
+{
+	unsigned long esr = rec->last_run_info.esr;
+	unsigned int rt = esr_sysreg_rt(esr);
+
+	if ((esr & ESR_EL2_EC_MASK) != ESR_EL2_EC_SYSREG) {
+		return;
+	}
+
+	if (ESR_EL2_SYSREG_IS_WRITE(esr)) {
+		return;
+	}
+
+	/* Handle xzr */
+	if (rt != 31U) {
+		rec->regs[rt] = rec_entry->gprs[0];
+	}
+}
+
+static void complete_hvc_exit(struct rec *rec, struct rmi_rec_entry *rec_entry)
+{
+	unsigned long esr = rec->last_run_info.esr;
+	unsigned int i;
+
+	if ((esr & ESR_EL2_EC_MASK) != ESR_EL2_EC_HVC) {
+		return;
+	}
+
+	for (i = 0U; i < REC_EXIT_NR_GPRS; i++) {
+		rec->regs[i] = rec_entry->gprs[i];
+	}
+}
+
+static bool complete_host_call(struct rec *rec, struct rmi_rec_run *rec_run)
+{
+	struct rsi_walk_result walk_result;
+
+	if (!rec->host_call) {
+		return true;
+	}
+
+	walk_result = complete_rsi_host_call(rec, &rec_run->entry);
+
+	if (walk_result.abort) {
+		emulate_stage2_data_abort(rec, &rec_run->exit, walk_result.rtt_level);
+		return false;
+	}
+
+	rec->host_call = false;
+	return true;
+}
+
+unsigned long smc_rec_enter(unsigned long rec_addr,
+			    unsigned long rec_run_addr)
+{
+	struct granule *g_rec;
+	struct granule *g_run;
+	struct rec *rec;
+	struct rd *rd;
+	struct rmi_rec_run rec_run;
+	unsigned long realm_state, ret;
+	bool success;
+
+	/*
+	 * The content of `rec_run.exit` shall be returned to the host.
+	 * Zero the structure to avoid the leakage of
+	 * the content of the RMM's stack.
+	 */
+	(void)memset(&rec_run.exit, 0, sizeof(struct rmi_rec_exit));
+
+	g_run = find_granule(rec_run_addr);
+	if ((g_run == NULL) || (g_run->state != GRANULE_STATE_NS)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	/* For a REC to be runnable, it should be unused (refcount = 0) */
+	g_rec = find_lock_unused_granule(rec_addr, GRANULE_STATE_REC);
+	if (ptr_is_err(g_rec)) {
+		return (unsigned long)ptr_status(g_rec);
+	}
+
+	/*
+	 * Increment refcount. REC can have lock-free access, thus atomic access
+	 * is required. Also, since the granule is only used for refcount
+	 * update, only an atomic operation will suffice and release/acquire
+	 * semantics are not required.
+	 */
+	atomic_granule_get(g_rec);
+
+	/* Unlock the granule before switching to realm world. */
+	granule_unlock(g_rec);
+
+	success = ns_buffer_read(SLOT_NS, g_run, 0U,
+				 sizeof(struct rmi_rec_entry), &rec_run.entry);
+
+	if (!success) {
+		/*
+		 * Decrement refcount. Lock-free access to REC, thus atomic and
+		 * release semantics is required.
+		 */
+		atomic_granule_put_release(g_rec);
+		return RMI_ERROR_INPUT;
+	}
+
+	rec = granule_map(g_rec, SLOT_REC);
+
+	rd = granule_map(rec->realm_info.g_rd, SLOT_RD);
+	realm_state = get_rd_state_unlocked(rd);
+	buffer_unmap(rd);
+
+	switch (realm_state) {
+	case REALM_STATE_NEW:
+		ret = pack_return_code(RMI_ERROR_REALM, 0U);
+		goto out_unmap_buffers;
+		break;
+	case REALM_STATE_ACTIVE:
+		break;
+	case REALM_STATE_SYSTEM_OFF:
+		ret = pack_return_code(RMI_ERROR_REALM, 1U);
+		goto out_unmap_buffers;
+		break;
+	default:
+		assert(false);
+		break;
+	}
+
+	if (!rec->runnable) {
+		ret = RMI_ERROR_REC;
+		goto out_unmap_buffers;
+	}
+
+	/* REC with pending PSCI command is not schedulable */
+	if (rec->psci_info.pending) {
+		ret = RMI_ERROR_REC;
+		goto out_unmap_buffers;
+	}
+
+	/*
+	 * Check GIC state after checking other conditions but before doing
+	 * anything which may have side effects.
+	 */
+	gic_copy_state_from_ns(&rec->sysregs.gicstate, &rec_run.entry);
+	if (!gic_validate_state(&rec->sysregs.gicstate)) {
+		ret = RMI_ERROR_REC;
+		goto out_unmap_buffers;
+	}
+
+	if (!complete_mmio_emulation(rec, &rec_run.entry)) {
+		ret = RMI_ERROR_REC;
+		goto out_unmap_buffers;
+	}
+
+	if (!complete_sea_insertion(rec, &rec_run.entry)) {
+		ret = RMI_ERROR_REC;
+		goto out_unmap_buffers;
+	}
+
+	complete_set_ripas(rec);
+	complete_sysreg_emulation(rec, &rec_run.entry);
+	complete_hvc_exit(rec, &rec_run.entry);
+
+	if (!complete_host_call(rec, &rec_run)) {
+		ret = RMI_SUCCESS;
+		goto out_unmap_buffers;
+	}
+
+	reset_last_run_info(rec);
+
+	rec->sysregs.hcr_el2 = rec->common_sysregs.hcr_el2;
+	if ((rec_run.entry.flags & REC_ENTRY_FLAG_TRAP_WFI) != 0UL) {
+		rec->sysregs.hcr_el2 |= HCR_TWI;
+	}
+	if ((rec_run.entry.flags & REC_ENTRY_FLAG_TRAP_WFE) != 0UL) {
+		rec->sysregs.hcr_el2 |= HCR_TWE;
+	}
+
+	ret = RMI_SUCCESS;
+
+	rec_run_loop(rec, &rec_run.exit);
+	/* Undo the heap association */
+
+	gic_copy_state_to_ns(&rec->sysregs.gicstate, &rec_run.exit);
+
+out_unmap_buffers:
+	buffer_unmap(rec);
+
+	if (ret == RMI_SUCCESS) {
+		if (!ns_buffer_write(SLOT_NS, g_run,
+				     offsetof(struct rmi_rec_run, exit),
+				     sizeof(struct rmi_rec_exit), &rec_run.exit)) {
+			ret = RMI_ERROR_INPUT;
+		}
+	}
+
+	atomic_granule_put_release(g_rec);
+
+	return ret;
+}
diff --git a/runtime/rmi/system.c b/runtime/rmi/system.c
new file mode 100644
index 0000000..24e53ef
--- /dev/null
+++ b/runtime/rmi/system.c
@@ -0,0 +1,16 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+#include <assert.h>
+#include <debug.h>
+#include <smc-handler.h>
+#include <smc-rmi.h>
+
+COMPILER_ASSERT(RMI_ABI_VERSION_MAJOR <= 0x7FFF);
+COMPILER_ASSERT(RMI_ABI_VERSION_MINOR <= 0xFFFF);
+
+unsigned long smc_version(void)
+{
+	return RMI_ABI_VERSION;
+}
diff --git a/runtime/rsi/config.c b/runtime/rsi/config.c
new file mode 100644
index 0000000..38a6446
--- /dev/null
+++ b/runtime/rsi/config.c
@@ -0,0 +1,67 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <granule.h>
+#include <realm.h>
+#include <rsi-config.h>
+#include <rsi-walk.h>
+#include <smc-rsi.h>
+
+struct rsi_config_result  handle_rsi_realm_config(struct rec *rec)
+{
+	struct rsi_config_result res = { 0 };
+	unsigned long ipa = rec->regs[1];
+	struct rd *rd;
+	enum s2_walk_status walk_status;
+	struct s2_walk_result walk_res;
+	struct granule *gr;
+	struct rsi_realm_config *config;
+
+	if (!GRANULE_ALIGNED(ipa) || !addr_in_rec_par(rec, ipa)) {
+		res.smc_res.x[0] = RSI_ERROR_INPUT;
+		return res;
+	}
+
+	rd = granule_map(rec->realm_info.g_rd, SLOT_RD);
+
+	walk_status = realm_ipa_to_pa(rd, ipa, &walk_res);
+
+	if (walk_status == WALK_FAIL) {
+		if (s2_walk_result_match_ripas(&walk_res, RMI_EMPTY)) {
+			res.smc_res.x[0] = RSI_ERROR_INPUT;
+		} else {
+			/* Exit to Host */
+			res.walk_result.abort = true;
+			res.walk_result.rtt_level = walk_res.rtt_level;
+		}
+		goto out_unmap_rd;
+	}
+
+	if (walk_status == WALK_INVALID_PARAMS) {
+		/* Return error to Realm */
+		res.smc_res.x[0] = RSI_ERROR_INPUT;
+		goto out_unmap_rd;
+	}
+
+	/* Map Realm data granule to RMM address space */
+	gr = find_granule(walk_res.pa);
+	config = (struct rsi_realm_config *)granule_map(gr, SLOT_RSI_CALL);
+
+	/* Populate config structure */
+	config->ipa_width = rec->realm_info.ipa_bits;
+
+	/* Unmap Realm data granule */
+	buffer_unmap(config);
+
+	/* Unlock last level RTT */
+	granule_unlock(walk_res.llt);
+
+	/* Write output values */
+	res.smc_res.x[0] = RSI_SUCCESS;
+
+out_unmap_rd:
+	buffer_unmap(rd);
+	return res;
+}
diff --git a/runtime/rsi/host_call.c b/runtime/rsi/host_call.c
new file mode 100644
index 0000000..3f70146
--- /dev/null
+++ b/runtime/rsi/host_call.c
@@ -0,0 +1,144 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <buffer.h>
+#include <granule.h>
+#include <realm.h>
+#include <rsi-host-call.h>
+#include <smc-rsi.h>
+#include <status.h>
+#include <string.h>
+
+/*
+ * If the RIPAS of the target IPA is empty then return value is RSI_ERROR_INPUT.
+ *
+ * If the RTT walk fails then:
+ *   - @rsi_walk_result.abort is true and @rsi_walk_result.rtt_level is the
+ *     last level reached by the walk.
+ *   - Return value is RSI_SUCCESS.
+ *
+ * If the RTT walk succeeds then:
+ *   - If @exit is not NULL and @entry is NULL, then copy host call arguments
+ *     from host call data structure (in Realm memory) to @exit.
+ *   - If @exit is NULL and @entry is not NULL, then copy host call results to
+ *     host call data structure (in Realm memory).
+ *   - Return value is RSI_SUCCESS.
+ */
+static unsigned int do_host_call(struct rec *rec,
+				 struct rmi_rec_exit *rec_exit,
+				 struct rmi_rec_entry *rec_entry,
+				 struct rsi_walk_result *rsi_walk_result)
+{
+	enum s2_walk_status walk_status;
+	struct s2_walk_result walk_result;
+	unsigned long ipa = rec->regs[1];
+	unsigned long page_ipa;
+	struct rd *rd;
+	struct granule *gr;
+	unsigned char *data;
+	struct rsi_host_call *host_call;
+	unsigned int i;
+	unsigned int ret = RSI_SUCCESS;
+
+	assert(addr_in_rec_par(rec, ipa));
+	assert(((unsigned long)rec_entry | (unsigned long)rec_exit) != 0UL);
+
+	rd = granule_map(rec->realm_info.g_rd, SLOT_RD);
+
+	page_ipa = ipa & GRANULE_MASK;
+	walk_status = realm_ipa_to_pa(rd, page_ipa, &walk_result);
+
+	switch (walk_status) {
+	case WALK_SUCCESS:
+		break;
+	case WALK_FAIL:
+		if (s2_walk_result_match_ripas(&walk_result, RMI_EMPTY)) {
+			ret = RSI_ERROR_INPUT;
+		} else {
+			rsi_walk_result->abort = true;
+			rsi_walk_result->rtt_level = walk_result.rtt_level;
+		}
+		goto out;
+	case WALK_INVALID_PARAMS:
+		assert(false);
+		break;
+	}
+
+	/* Map Realm data granule to RMM address space */
+	gr = find_granule(walk_result.pa);
+	data = (unsigned char *)granule_map(gr, SLOT_RSI_CALL);
+	host_call = (struct rsi_host_call *)(data + (ipa - page_ipa));
+
+	if (rec_exit != NULL) {
+		/* Copy host call arguments to REC exit data structure */
+		rec_exit->imm = host_call->imm;
+		for (i = 0U; i < RSI_HOST_CALL_NR_GPRS; i++) {
+			rec_exit->gprs[i] = host_call->gprs[i];
+		}
+	}
+
+	if (rec_entry != NULL) {
+		/* Copy host call results to host call data structure */
+		for (i = 0U; i < RSI_HOST_CALL_NR_GPRS; i++) {
+			host_call->gprs[i] = rec_entry->gprs[i];
+		}
+	}
+
+	/* Unmap Realm data granule */
+	buffer_unmap(data);
+
+	/* Unlock last level RTT */
+	granule_unlock(walk_result.llt);
+
+out:
+	buffer_unmap(rd);
+	return ret;
+}
+
+struct rsi_host_call_result handle_rsi_host_call(struct rec *rec,
+						 struct rmi_rec_exit *rec_exit)
+{
+	struct rsi_host_call_result res = { { false, 0UL } };
+	unsigned long ipa = rec->regs[1];
+
+	if (!ALIGNED(ipa, sizeof(struct rsi_host_call))) {
+		res.smc_result = RSI_ERROR_INPUT;
+		return res;
+	}
+
+	if ((ipa / GRANULE_SIZE) !=
+		((ipa + sizeof(struct rsi_host_call) - 1UL) / GRANULE_SIZE)) {
+		res.smc_result = RSI_ERROR_INPUT;
+		return res;
+	}
+
+	if (!addr_in_rec_par(rec, ipa)) {
+		res.smc_result = RSI_ERROR_INPUT;
+		return res;
+	}
+
+	res.smc_result = do_host_call(rec, rec_exit, NULL, &res.walk_result);
+
+	return res;
+}
+
+struct rsi_walk_result complete_rsi_host_call(struct rec *rec,
+					      struct rmi_rec_entry *rec_entry)
+{
+	struct rsi_walk_result res = { false, 0UL };
+
+	/*
+	 * Do the necessary to walk the S2 RTTs and copy args from NS Host
+	 * to the host call data structure. But it is possible for the
+	 * RIPAS of the IPA to be EMPTY and hence this call can return
+	 * RSI_ERROR_INPUT. In this case, we return RSI_SUCCESS to Realm
+	 * and Realm may take an abort on accessing the IPA (depending on
+	 * the RIPAS of IPA at that time). This is a situation which can be
+	 * controlled from Realm and Realm should avoid this.
+	 */
+	(void)do_host_call(rec, NULL, rec_entry, &res);
+
+	return res;
+}
diff --git a/runtime/rsi/logger.c b/runtime/rsi/logger.c
new file mode 100644
index 0000000..03b41b5
--- /dev/null
+++ b/runtime/rsi/logger.c
@@ -0,0 +1,134 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <psci.h>
+#include <rsi-logger.h>
+#include <smc-rsi.h>
+#include <utils_def.h>
+
+/* RMI handler uses 29 chars for function name */
+#define	MAX_NAME_LEN	29U
+
+/* 5 64-bit parameters separated by space + 1 trailing space */
+#define PARAMS_STR_LEN	(5U * sizeof("0123456789ABCDEF") + 1U)
+
+#define	MAX_STATUS_LEN	sizeof("{RSI_ERROR_INPUT}")
+
+#define	BUFFER_SIZE	(MAX_NAME_LEN + PARAMS_STR_LEN +	\
+			sizeof("> ") - 1U +		\
+			MAX_STATUS_LEN)
+
+#define RSI_FUNCTION(id) \
+	[SMC_RSI_##id - SMC_RSI_ABI_VERSION] = #id
+
+static const char *rsi_logger[] = {
+	RSI_FUNCTION(ABI_VERSION),		/* 0xC4000190 */
+	RSI_FUNCTION(MEASUREMENT_READ),		/* 0xC4000192 */
+	RSI_FUNCTION(MEASUREMENT_EXTEND),	/* 0xC4000193 */
+	RSI_FUNCTION(ATTEST_TOKEN_INIT),	/* 0xC4000194 */
+	RSI_FUNCTION(ATTEST_TOKEN_CONTINUE),	/* 0xC4000195 */
+	RSI_FUNCTION(REALM_CONFIG),		/* 0xC4000196 */
+	RSI_FUNCTION(IPA_STATE_SET),		/* 0xC4000197 */
+	RSI_FUNCTION(IPA_STATE_GET),		/* 0xC4000198 */
+	RSI_FUNCTION(HOST_CALL)			/* 0xC4000199 */
+};
+
+#define RSI_STATUS_HANDLER(id)[id] = #id
+
+const char *rsi_status_handler[] = {
+	RSI_STATUS_HANDLER(RSI_SUCCESS),
+	RSI_STATUS_HANDLER(RSI_ERROR_INPUT),
+	RSI_STATUS_HANDLER(RSI_ERROR_STATE),
+	RSI_STATUS_HANDLER(RSI_INCOMPLETE)
+};
+
+COMPILER_ASSERT(ARRAY_LEN(rsi_status_handler) == RSI_ERROR_COUNT);
+
+static int print_entry(unsigned int id, unsigned long args[5],
+		       char *buf, size_t len)
+{
+	char name[sizeof("SMC_RSI_ATTEST_TOKEN_CONTINUE")];
+	int cnt __unused;
+
+	switch (id) {
+	case SMC_RSI_ABI_VERSION ... SMC_RSI_HOST_CALL:
+
+		if (rsi_logger[id - SMC_RSI_ABI_VERSION] != NULL) {
+			cnt = snprintf(name, sizeof(name), "%s%s", "SMC_RSI_",
+			       rsi_logger[id - SMC_RSI_ABI_VERSION]);
+		} else {
+			/* Handle gaps in RSI commands numbering */
+			cnt = snprintf(name, sizeof(name), "%s%08x", "SMC_RSI_", id);
+		}
+
+		break;
+
+	/* SMC32 PSCI calls */
+	case SMC32_PSCI_FID_MIN ... SMC32_PSCI_FID_MAX:
+		FALLTHROUGH;
+	case SMC64_PSCI_FID_MIN ... SMC64_PSCI_FID_MAX:
+		cnt = snprintf(name, sizeof(name), "%s%08x", "PSCI_", id);
+		break;
+
+	/* Other SMC calls */
+	default:
+		cnt = snprintf(name, sizeof(name), "%s%08x", "SMC_", id);
+		break;
+	}
+
+	assert((cnt > 0) && (cnt < sizeof(name)));
+
+	return snprintf(buf, len, "%-29s %8lx %8lx %8lx %8lx %8lx ",
+			name, args[0], args[1], args[2], args[3], args[4]);
+}
+
+static int print_status(char *buf, size_t len, unsigned long res)
+{
+	return_code_t rc = unpack_return_code(res);
+
+	if ((unsigned long)rc.status >= RSI_ERROR_COUNT) {
+		return snprintf(buf, len, "> %lx", res);
+	}
+
+	return snprintf(buf, len, "> %s",
+			rsi_status_handler[rc.status]);
+}
+
+static int print_code(char *buf, size_t len, unsigned long res)
+{
+	return snprintf(buf, len, "> %lx", res);
+}
+
+void rsi_log_on_exit(unsigned int function_id, unsigned long args[5],
+		     unsigned long res, bool exit_to_rec)
+{
+	char buffer[BUFFER_SIZE];
+	char *buf_ptr = buffer;
+	size_t buf_len = sizeof(buffer);
+	int cnt = print_entry(function_id, args, buf_ptr, buf_len);
+
+	assert((cnt > 0) && (cnt < buf_len));
+
+	buf_ptr += cnt;
+	buf_len -= cnt;
+
+	/* Print result when execution continues in REC */
+	if (exit_to_rec) {
+		if ((function_id >= SMC_RSI_MEASUREMENT_READ) &&
+		    (function_id <= SMC_RSI_HOST_CALL)) {
+			/* Print status */
+			cnt = print_status(buf_ptr, buf_len, res);
+		} else {
+			/* Print result code */
+			cnt = print_code(buf_ptr, buf_len, res);
+		}
+
+		assert((cnt > 0) && (cnt < buf_len));
+	}
+
+	rmm_log("%s\n", buffer);
+}
diff --git a/runtime/rsi/memory.c b/runtime/rsi/memory.c
new file mode 100644
index 0000000..c59272b
--- /dev/null
+++ b/runtime/rsi/memory.c
@@ -0,0 +1,74 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <realm.h>
+#include <ripas.h>
+#include <rsi-memory.h>
+#include <smc-rsi.h>
+#include <status.h>
+
+bool handle_rsi_ipa_state_set(struct rec *rec, struct rmi_rec_exit *rec_exit)
+{
+	unsigned long start = rec->regs[1];
+	unsigned long size = rec->regs[2];
+	unsigned long end = start + size;
+	enum ripas ripas = (enum ripas)rec->regs[3];
+
+	if (ripas > RMI_RAM) {
+		return true;
+	}
+
+	if (!GRANULE_ALIGNED(start)) {
+		return true;
+	}
+
+	if (!GRANULE_ALIGNED(size)) {
+		return true;
+	}
+
+	if (end <= start) {
+		/* Size is zero, or range overflows */
+		return true;
+	}
+
+	if (!region_in_rec_par(rec, start, end)) {
+		return true;
+	}
+
+	rec->set_ripas.start = start;
+	rec->set_ripas.end = end;
+	rec->set_ripas.addr = start;
+	rec->set_ripas.ripas = ripas;
+
+	rec_exit->exit_reason = RMI_EXIT_RIPAS_CHANGE;
+	rec_exit->ripas_base = start;
+	rec_exit->ripas_size = size;
+	rec_exit->ripas_value = (unsigned int)ripas;
+
+	return false;
+}
+
+rsi_status_t handle_rsi_ipa_state_get(struct rec *rec, unsigned long ipa,
+				      enum ripas *ripas_ptr)
+{
+	bool s2tte_destroyed;
+
+	if (!GRANULE_ALIGNED(ipa)) {
+		return RSI_ERROR_INPUT;
+	}
+
+	if (!addr_in_rec_par(rec, ipa)) {
+		return RSI_ERROR_INPUT;
+	}
+
+	realm_ipa_get_ripas(rec, ipa, ripas_ptr, &s2tte_destroyed);
+	if (s2tte_destroyed == true) {
+		/* TODO: handle destroyed state appropriately */
+		return RSI_ERROR_INPUT;
+	}
+
+	return RSI_SUCCESS;
+}
diff --git a/runtime/rsi/psci.c b/runtime/rsi/psci.c
new file mode 100644
index 0000000..737fdad
--- /dev/null
+++ b/runtime/rsi/psci.c
@@ -0,0 +1,369 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <granule.h>
+#include <psci.h>
+#include <realm.h>
+#include <rec.h>
+#include <smc-rmi.h>
+#include <smc.h>
+#include <stdint.h>
+
+static struct psci_result psci_version(struct rec *rec)
+{
+	struct psci_result result = { 0 };
+	unsigned int version_1_1 = (1U << 16) | 1U;
+
+	result.smc_res.x[0] = (unsigned long)version_1_1;
+	return result;
+}
+
+static struct psci_result psci_cpu_suspend(struct rec *rec,
+					  unsigned long entry_point_address,
+					  unsigned long context_id)
+{
+	struct psci_result result = { 0 };
+
+	/*
+	 * We treat all target power states as suspend requests, so all we
+	 * need to do is inform that NS hypervisor and we can ignore all the
+	 * parameters.
+	 */
+	result.hvc_forward.forward_psci_call = true;
+
+	result.smc_res.x[0] = PSCI_RETURN_SUCCESS;
+	return result;
+}
+
+static struct psci_result psci_cpu_off(struct rec *rec)
+{
+	struct psci_result result = { 0 };
+
+	result.hvc_forward.forward_psci_call = true;
+
+	/*
+	 * It should be fine to set this flag without holding a lock on the
+	 * REC or without explicit memory barriers or ordering semantics
+	 * operations, because we already ensure that a REC can only be in an
+	 * executing state once at any given time, and we're in this execution
+	 * context already, and we will be holding a reference count on the
+	 * REC at this point, which will be dropped and re-evaluated with
+	 * proper barriers before any CPU can evaluate the runnable field
+	 * after this change.
+	 */
+	rec->runnable = false;
+
+	result.smc_res.x[0] = PSCI_RETURN_SUCCESS;
+	return result;
+}
+
+static void psci_reset_rec(struct rec *rec, unsigned long caller_sctlr_el1)
+{
+	/* Set execution level to EL1 (AArch64) and mask exceptions */
+	rec->pstate = SPSR_EL2_MODE_EL1h |
+		      SPSR_EL2_nRW_AARCH64 |
+		      SPSR_EL2_F_BIT |
+		      SPSR_EL2_I_BIT |
+		      SPSR_EL2_A_BIT |
+		      SPSR_EL2_D_BIT;
+
+	/* Disable stage 1 MMU and caches */
+	rec->sysregs.sctlr_el1 = SCTLR_EL1_FLAGS;
+
+	/* Set the endianness of the target to that of the caller */
+	rec->sysregs.sctlr_el1 |= caller_sctlr_el1 & SCTLR_EL1_EE;
+}
+
+static unsigned long rd_map_read_rec_count(struct granule *g_rd)
+{
+	unsigned long rec_count;
+	struct rd *rd = granule_map(g_rd, SLOT_RD);
+
+	rec_count = get_rd_rec_count_unlocked(rd);
+	buffer_unmap(rd);
+	return rec_count;
+}
+
+static struct psci_result psci_cpu_on(struct rec *rec,
+				      unsigned long target_cpu,
+				      unsigned long entry_point_address,
+				      unsigned long context_id)
+{
+	struct psci_result result = { 0 };
+	unsigned long target_rec_idx;
+
+	/* Check that entry_point_address is a Protected Realm Address */
+	if (!addr_in_rec_par(rec, entry_point_address)) {
+		result.smc_res.x[0] = PSCI_RETURN_INVALID_ADDRESS;
+		return result;
+	}
+
+	/* Get REC index from MPIDR */
+	target_rec_idx = mpidr_to_rec_idx(target_cpu);
+
+	/*
+	 * Check that the target_cpu is a valid value.
+	 * Note that the RMM enforces that the REC are created with
+	 * consecutively increasing indexes starting from zero.
+	 */
+	if (target_rec_idx >= rd_map_read_rec_count(rec->realm_info.g_rd)) {
+		result.smc_res.x[0] = PSCI_RETURN_INVALID_PARAMS;
+		return result;
+	}
+
+	/* Check if we're trying to turn ourselves on */
+	if (target_rec_idx == rec->rec_idx) {
+		result.smc_res.x[0] = PSCI_RETURN_ALREADY_ON;
+		return result;
+	}
+
+	rec->psci_info.pending = true;
+
+	result.hvc_forward.forward_psci_call = true;
+	result.hvc_forward.x1 = target_cpu;
+	return result;
+}
+
+static struct psci_result psci_affinity_info(struct rec *rec,
+					     unsigned long target_affinity,
+					     unsigned long lowest_affinity_level)
+{
+	struct psci_result result = { 0 };
+	unsigned long target_rec_idx;
+
+	if (lowest_affinity_level != 0UL) {
+		result.smc_res.x[0] = PSCI_RETURN_INVALID_PARAMS;
+		return result;
+	}
+
+	/* Get REC index from MPIDR */
+	target_rec_idx = mpidr_to_rec_idx(target_affinity);
+
+	/*
+	 * Check that the target_affinity is a valid value.
+	 * Note that the RMM enforces that the REC are created with
+	 * consecutively increasing indexes starting from zero.
+	 */
+	if (target_rec_idx >= rd_map_read_rec_count(rec->realm_info.g_rd)) {
+		result.smc_res.x[0] = PSCI_RETURN_INVALID_PARAMS;
+		return result;
+	}
+
+	/* Check if the vCPU targets itself */
+	if (target_rec_idx == rec->rec_idx) {
+		result.smc_res.x[0] = PSCI_AFFINITY_INFO_ON;
+		return result;
+	}
+
+	rec->psci_info.pending = true;
+
+	result.hvc_forward.forward_psci_call = true;
+	result.hvc_forward.x1 = target_affinity;
+	return result;
+}
+
+/*
+ * Turning a system off or requesting a reboot of a realm is enforced by the
+ * RMM by preventing execution of a REC after the function has run.  Reboot
+ * functionality must be provided by the host hypervisor by creating a new
+ * Realm with associated attestation, measurement etc.
+ */
+static void system_off_reboot(struct rec *rec)
+{
+	struct rd *rd;
+	struct granule *g_rd = rec->realm_info.g_rd;
+
+	/*
+	 * The RECs (and, consequently, the PSCI calls) run without any
+	 * RMM lock held. Therefore, we cannot cause a deadlock when we acquire
+	 * the rd lock here before we set the Realm's new state.
+	 */
+	granule_lock(g_rd, GRANULE_STATE_RD);
+	rd = granule_map(rec->realm_info.g_rd, SLOT_RD);
+
+	set_rd_state(rd, REALM_STATE_SYSTEM_OFF);
+
+	buffer_unmap(rd);
+	granule_unlock(g_rd);
+
+	/* TODO: Invalidate all stage 2 entris to ensure REC exits */
+}
+
+static struct psci_result psci_system_off(struct rec *rec)
+{
+	struct psci_result result = { 0 };
+
+	system_off_reboot(rec);
+
+	result.hvc_forward.forward_psci_call = true;
+	return result;
+}
+
+static struct psci_result psci_system_reset(struct rec *rec)
+{
+	struct psci_result result = { 0 };
+
+	system_off_reboot(rec);
+
+	result.hvc_forward.forward_psci_call = true;
+	return result;
+}
+
+static struct psci_result psci_features(struct rec *rec,
+				       unsigned int psci_func_id)
+{
+	struct psci_result result = { 0 };
+	unsigned long ret;
+
+	switch (psci_func_id) {
+	case SMC32_PSCI_CPU_SUSPEND:
+	case SMC64_PSCI_CPU_SUSPEND:
+	case SMC32_PSCI_CPU_OFF:
+	case SMC32_PSCI_CPU_ON:
+	case SMC64_PSCI_CPU_ON:
+	case SMC32_PSCI_AFFINITY_INFO:
+	case SMC64_PSCI_AFFINITY_INFO:
+	case SMC32_PSCI_SYSTEM_OFF:
+	case SMC32_PSCI_SYSTEM_RESET:
+	case SMC32_PSCI_FEATURES:
+	case SMCCC_VERSION:
+		ret = 0UL;
+		break;
+	default:
+		ret = PSCI_RETURN_NOT_SUPPORTED;
+	}
+
+	result.smc_res.x[0] = ret;
+	return result;
+}
+
+struct psci_result psci_rsi(struct rec *rec,
+			    unsigned int function_id,
+			    unsigned long arg0,
+			    unsigned long arg1,
+			    unsigned long arg2)
+{
+	struct psci_result result;
+
+	switch (function_id) {
+	case SMC32_PSCI_VERSION:
+		result = psci_version(rec);
+		break;
+	case SMC32_PSCI_CPU_SUSPEND:
+	case SMC64_PSCI_CPU_SUSPEND:
+		result = psci_cpu_suspend(rec, arg0, arg1);
+		break;
+	case SMC32_PSCI_CPU_OFF:
+		result = psci_cpu_off(rec);
+		break;
+	case SMC32_PSCI_CPU_ON:
+		arg0 = (unsigned int)arg0;
+		arg1 = (unsigned int)arg1;
+		arg2 = (unsigned int)arg2;
+		/* Fall through */
+	case SMC64_PSCI_CPU_ON:
+		result = psci_cpu_on(rec, arg0, arg1, arg2);
+		break;
+	case SMC32_PSCI_AFFINITY_INFO:
+		arg0 = (unsigned int)arg0;
+		arg1 = (unsigned int)arg1;
+		FALLTHROUGH;
+	case SMC64_PSCI_AFFINITY_INFO:
+		result = psci_affinity_info(rec, arg0, arg1);
+		break;
+	case SMC32_PSCI_SYSTEM_OFF:
+		result = psci_system_off(rec);
+		break;
+	case SMC32_PSCI_SYSTEM_RESET:
+		result = psci_system_reset(rec);
+		break;
+	case SMC32_PSCI_FEATURES:
+		result = psci_features(rec, arg0);
+		break;
+	default:
+		result.smc_res.x[0] = PSCI_RETURN_NOT_SUPPORTED;
+		result.hvc_forward.forward_psci_call = false;
+		break;
+	}
+
+	return result;
+}
+
+/*
+ * In the following two functions, it is only safe to access the runnable field
+ * on the target_rec once the target_rec is no longer running on another PE and
+ * all writes performed by the other PE as part of smc_rec_enter is also
+ * guaranteed to be observed here, which we know when we read a zero refcount
+ * on the target rec using acquire semantics paired with the release semantics
+ * on the reference count in smc_rec_enter. If we observe a non-zero refcount
+ * it simply means that the target_rec is running and we can return the
+ * corresponding value.
+ */
+static unsigned long complete_psci_cpu_on(struct rec *target_rec,
+					  unsigned long entry_point_address,
+					  unsigned long caller_sctlr_el1)
+{
+	if ((granule_refcount_read_acquire(target_rec->g_rec) != 0UL) ||
+		target_rec->runnable) {
+		return PSCI_RETURN_ALREADY_ON;
+	}
+
+	psci_reset_rec(target_rec, caller_sctlr_el1);
+	target_rec->pc = entry_point_address;
+	target_rec->runnable = true;
+	return PSCI_RETURN_SUCCESS;
+}
+
+static unsigned long complete_psci_affinity_info(struct rec *target_rec)
+{
+	if ((granule_refcount_read_acquire(target_rec->g_rec) != 0UL) ||
+		target_rec->runnable) {
+		return PSCI_AFFINITY_INFO_ON;
+	}
+
+	return PSCI_AFFINITY_INFO_OFF;
+}
+
+unsigned long psci_complete_request(struct rec *calling_rec,
+				    struct rec *target_rec)
+{
+	unsigned long ret = PSCI_RETURN_NOT_SUPPORTED;
+	unsigned long mpidr = calling_rec->regs[1];
+
+	if (!calling_rec->psci_info.pending) {
+		return RMI_ERROR_INPUT;
+	}
+
+	if (calling_rec->realm_info.g_rd != target_rec->realm_info.g_rd) {
+		return RMI_ERROR_INPUT;
+	}
+
+	if (mpidr_to_rec_idx(mpidr) != target_rec->rec_idx) {
+		return RMI_ERROR_INPUT;
+	}
+
+	switch (calling_rec->regs[0]) {
+	case SMC32_PSCI_CPU_ON:
+	case SMC64_PSCI_CPU_ON:
+		ret = complete_psci_cpu_on(target_rec,
+					   calling_rec->regs[2],
+					   calling_rec->sysregs.sctlr_el1);
+		break;
+	case SMC32_PSCI_AFFINITY_INFO:
+	case SMC64_PSCI_AFFINITY_INFO:
+		ret = complete_psci_affinity_info(target_rec);
+		break;
+	default:
+		assert(false);
+	}
+
+	calling_rec->regs[0] = ret;
+	calling_rec->regs[1] = 0;
+	calling_rec->regs[2] = 0;
+	calling_rec->regs[3] = 0;
+	calling_rec->psci_info.pending = false;
+
+	return RMI_SUCCESS;
+}
diff --git a/runtime/rsi/realm_attest.c b/runtime/rsi/realm_attest.c
new file mode 100644
index 0000000..a55f32d
--- /dev/null
+++ b/runtime/rsi/realm_attest.c
@@ -0,0 +1,379 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <attestation.h>
+#include <attestation_token.h>
+#include <debug.h>
+#include <granule.h>
+#include <measurement.h>
+#include <realm.h>
+#include <realm_attest.h>
+#include <smc-rsi.h>
+#include <smc.h>
+#include <string.h>
+#include <utils_def.h>
+
+#define MAX_EXTENDED_SIZE		(64U)
+
+/*
+ * Return the Realm Personalization Value.
+ *
+ * Arguments:
+ * rd    - The Realm descriptor.
+ * claim - The structure to return the Realm Personalization Value claim
+ */
+static void get_rpv(struct rd *rd, struct q_useful_buf_c *claim)
+{
+	claim->ptr = (uint8_t *)&(rd->rpv[0]);
+	claim->len = RPV_SIZE;
+}
+
+/*
+ * Save the input parameters in the context for later iterations to check for
+ * consistency.
+ */
+static void save_input_parameters(struct rec *rec)
+{
+	rec->token_sign_ctx.token_ipa = rec->regs[1];
+	(void)memcpy(rec->token_sign_ctx.challenge, &rec->regs[2],
+		     ATTEST_CHALLENGE_SIZE);
+}
+
+/*
+ * Verify that in all the iterations the input parameters are the same
+ * as in the initial call.
+ */
+static bool verify_input_parameters_consistency(struct rec *rec)
+{
+	return rec->token_sign_ctx.token_ipa == rec->regs[1];
+}
+
+/*
+ * Function to continue with the sign operation.
+ * It returns void as the result will be updated in the
+ * struct attest_result passed as argument.
+ */
+static void attest_token_continue_sign_state(struct rec *rec,
+					     struct attest_result *res)
+{
+	/*
+	 * Sign and finish creating the token.
+	 */
+	enum attest_token_err_t ret =
+		attest_realm_token_sign(&(rec->token_sign_ctx.ctx),
+					&(rec->rmm_realm_token));
+
+	if ((ret == ATTEST_TOKEN_ERR_COSE_SIGN_IN_PROGRESS) ||
+		(ret == ATTEST_TOKEN_ERR_SUCCESS)) {
+		/*
+		 * Return to RSI handler function after each iteration
+		 * to check is there anything else to do (pending IRQ)
+		 * or next signing iteration can be executed.
+		 */
+		res->incomplete = true;
+		res->smc_res.x[0] = RSI_INCOMPLETE;
+
+		/* If this was the last signing cycle */
+		if (ret == ATTEST_TOKEN_ERR_SUCCESS) {
+			rec->token_sign_ctx.state =
+				ATTEST_SIGN_TOKEN_WRITE_IN_PROGRESS;
+		}
+	} else {
+		/* Accessible only in case of failure during token signing */
+		ERROR("FATAL_ERROR: Realm token creation failed\n");
+		panic();
+	}
+}
+
+/*
+ * Function to continue with the token write operation.
+ * It returns void as the result will be updated in the
+ * struct attest_result passed as argument.
+ */
+static void attest_token_continue_write_state(struct rec *rec,
+					      struct attest_result *res)
+{
+	struct rd *rd = NULL;
+	struct granule *gr;
+	uint8_t *realm_att_token;
+	unsigned long realm_att_token_ipa = rec->regs[1];
+	enum s2_walk_status walk_status;
+	struct s2_walk_result walk_res = { 0UL };
+	struct q_useful_buf     attest_token_buf;
+	size_t    attest_token_len;
+
+	/*
+	 * The refcount on rd and rec will protect from any changes
+	 * while REC is running.
+	 */
+	rd = granule_map(rec->realm_info.g_rd, SLOT_RD);
+
+	/*
+	 * Translate realm granule IPA to PA. If returns with
+	 * WALK_SUCCESS then the last level page table (llt),
+	 * which holds the realm_att_token_buf mapping, is locked.
+	 */
+	walk_status = realm_ipa_to_pa(rd, realm_att_token_ipa, &walk_res);
+	buffer_unmap(rd);
+
+	/* Walk parameter validity was checked by RSI_ATTESTATION_TOKEN_INIT */
+	assert(walk_status != WALK_INVALID_PARAMS);
+
+	if (walk_status == WALK_FAIL) {
+		if (s2_walk_result_match_ripas(&walk_res, RMI_EMPTY)) {
+			res->smc_res.x[0] = RSI_ERROR_INPUT;
+		} else {
+			/*
+			 * Translation failed, IPA is not mapped. Return to NS host to
+			 * fix the issue.
+			 */
+			res->walk_result.abort = true;
+			res->walk_result.rtt_level = walk_res.rtt_level;
+			res->smc_res.x[0] = RSI_INCOMPLETE;
+		}
+		return;
+	}
+
+	/* Map realm data granule to RMM address space */
+	gr = find_granule(walk_res.pa);
+	realm_att_token = granule_map(gr, SLOT_RSI_CALL);
+
+	attest_token_buf.ptr = realm_att_token;
+	attest_token_buf.len = ATTEST_TOKEN_BUFFER_SIZE;
+
+	attest_token_len = attest_cca_token_create(&attest_token_buf,
+						   &rec->rmm_realm_token);
+
+	/* Unmap realm granule */
+	buffer_unmap(realm_att_token);
+
+	/* Unlock last level page table (walk_res.g_llt) */
+	granule_unlock(walk_res.llt);
+
+	/* Write output parameters */
+	if (attest_token_len == 0) {
+		res->smc_res.x[0] = RSI_ERROR_INPUT;
+	} else {
+		res->smc_res.x[0] = RSI_SUCCESS;
+		res->smc_res.x[1] = attest_token_len;
+	}
+
+	/* The signing has either succeeded or failed. Reset the state. */
+	rec->token_sign_ctx.state = ATTEST_SIGN_NOT_STARTED;
+}
+
+unsigned long handle_rsi_attest_token_init(struct rec *rec)
+{
+	struct rd *rd = NULL;
+	unsigned long ret;
+	unsigned long realm_buf_ipa = rec->regs[1];
+	struct q_useful_buf rmm_realm_token_buf = {
+		rec->rmm_realm_token_buf, sizeof(rec->rmm_realm_token_buf)};
+	struct q_useful_buf_c rpv;
+	int att_ret;
+
+	assert(rec != NULL);
+
+	/*
+	 * Calling RSI_ATTESTATION_TOKEN_INIT any time aborts any ongoing
+	 * operation.
+	 * TODO: This can be moved to attestation lib
+	 */
+	if (rec->token_sign_ctx.state != ATTEST_SIGN_NOT_STARTED) {
+		int restart;
+
+		rec->token_sign_ctx.state = ATTEST_SIGN_NOT_STARTED;
+		restart = attestation_heap_reinit_pe(rec->aux_data.attest_heap_buf,
+						      REC_HEAP_PAGES * SZ_4K);
+		if (restart != 0) {
+			/* There is no provision for this failure so panic */
+			panic();
+		}
+	}
+
+	if (!GRANULE_ALIGNED(realm_buf_ipa)) {
+		return RSI_ERROR_INPUT;
+	}
+
+	/*
+	 * rd lock is acquired so that measurement cannot be updated
+	 * simultaneously by another rec
+	 */
+	granule_lock(rec->realm_info.g_rd, GRANULE_STATE_RD);
+	rd = granule_map(rec->realm_info.g_rd, SLOT_RD);
+	if (!addr_in_par(rd, realm_buf_ipa)) {
+		ret = RSI_ERROR_INPUT;
+		goto out_unmap_rd;
+	}
+
+	/*
+	 * Save the input parameters in the context for later iterations
+	 * to check.
+	 */
+	save_input_parameters(rec);
+
+	get_rpv(rd, &rpv);
+	att_ret = attest_realm_token_create(rd->algorithm, rd->measurement,
+					    MEASUREMENT_SLOT_NR,
+					    &rpv,
+					    &rec->token_sign_ctx,
+					    &rmm_realm_token_buf);
+	if (att_ret != 0) {
+		ERROR("FATAL_ERROR: Realm token creation failed,\n");
+		panic();
+	}
+
+	rec->token_sign_ctx.state = ATTEST_SIGN_IN_PROGRESS;
+	ret = RSI_SUCCESS;
+
+out_unmap_rd:
+	buffer_unmap(rd);
+	granule_unlock(rec->realm_info.g_rd);
+	return ret;
+}
+
+void attest_realm_token_sign_continue_start(void)
+{
+	fpu_save_my_state();
+}
+
+void attest_realm_token_sign_continue_finish(void)
+{
+	fpu_restore_my_state();
+}
+
+void handle_rsi_attest_token_continue(struct rec *rec,
+				      struct attest_result *res)
+{
+	assert(rec != NULL);
+	assert(res != NULL);
+
+	/* Initialize attest_result */
+	res->incomplete = false;
+	res->walk_result.abort = false;
+
+	if (!verify_input_parameters_consistency(rec)) {
+		res->smc_res.x[0] = RSI_ERROR_INPUT;
+		return;
+	}
+
+	switch (rec->token_sign_ctx.state) {
+	case ATTEST_SIGN_NOT_STARTED:
+		/*
+		 * Before this call the initial attestation token call
+		 * (SMC_RSI_ATTEST_TOKEN_INIT) must have been executed
+		 * successfully.
+		 */
+		res->smc_res.x[0] = RSI_ERROR_STATE;
+		break;
+	case ATTEST_SIGN_IN_PROGRESS:
+		attest_token_continue_sign_state(rec, res);
+		break;
+	case ATTEST_SIGN_TOKEN_WRITE_IN_PROGRESS:
+		attest_token_continue_write_state(rec, res);
+		break;
+	default:
+		/* Any other state is considered an error. */
+		assert(false);
+	}
+}
+
+unsigned long handle_rsi_extend_measurement(struct rec *rec)
+{
+	struct granule *g_rd;
+	struct rd *rd;
+	unsigned long index;
+	unsigned long rd_addr;
+	size_t size;
+	unsigned long ret;
+	void *extend_measurement;
+	unsigned char *current_measurement;
+	int __unused meas_ret;
+
+	/*
+	 * rd lock is acquired so that measurement cannot be updated
+	 * simultaneously by another rec
+	 */
+	rd_addr = granule_addr(rec->realm_info.g_rd);
+	g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
+
+	assert(g_rd != NULL);
+
+	rd = granule_map(rec->realm_info.g_rd, SLOT_RD);
+
+	/*
+	 * X1:     index
+	 * X2:     size
+	 * X3-X10: measurement value
+	 */
+	index = rec->regs[1];
+
+	if ((index == RIM_MEASUREMENT_SLOT) ||
+	    (index >= MEASUREMENT_SLOT_NR)) {
+		ret = RSI_ERROR_INPUT;
+		goto out_unmap_rd;
+	}
+
+	size  = rec->regs[2];
+
+	if (size > MAX_EXTENDED_SIZE) {
+		ret = RSI_ERROR_INPUT;
+		goto out_unmap_rd;
+	}
+
+	extend_measurement = &rec->regs[3];
+	current_measurement = rd->measurement[index];
+
+	measurement_extend(rd->algorithm,
+			   current_measurement,
+			   extend_measurement,
+			   size,
+			   current_measurement);
+
+	ret = RSI_SUCCESS;
+
+out_unmap_rd:
+	buffer_unmap(rd);
+	granule_unlock(g_rd);
+	return ret;
+}
+
+unsigned long handle_rsi_read_measurement(struct rec *rec)
+{
+	struct rd *rd;
+	unsigned long idx;
+	size_t measurement_size;
+
+	assert(rec != NULL);
+
+	/* X1: Index */
+	idx = rec->regs[1];
+
+	if (idx >= MEASUREMENT_SLOT_NR) {
+		return RSI_ERROR_INPUT;
+	}
+
+	/*
+	 * rd lock is acquired so that measurement cannot be updated
+	 * simultaneously by another rec
+	 */
+	granule_lock(rec->realm_info.g_rd, GRANULE_STATE_RD);
+	rd = granule_map(rec->realm_info.g_rd, SLOT_RD);
+
+	measurement_size = measurement_get_size(rd->algorithm);
+
+	(void)memcpy(&rec->regs[1], rd->measurement[idx], measurement_size);
+
+	/* Zero-initialize the unused area */
+	if (measurement_size < MAX_MEASUREMENT_SIZE) {
+		(void)memset((char *)(&rec->regs[1]) + measurement_size,
+			     0, MAX_MEASUREMENT_SIZE - measurement_size);
+	}
+
+	buffer_unmap(rd);
+	granule_unlock(rec->realm_info.g_rd);
+
+	return RSI_SUCCESS;
+}
diff --git a/runtime/rsi/realm_ipa_helper.c b/runtime/rsi/realm_ipa_helper.c
new file mode 100644
index 0000000..d6e38e9
--- /dev/null
+++ b/runtime/rsi/realm_ipa_helper.c
@@ -0,0 +1,137 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <buffer.h>
+#include <granule.h>
+#include <realm.h>
+
+/**
+ * Translate a realm granule IPA to PA.
+ *
+ * Parameters:
+ * [in]   rd		    Pointer to realm descriptor granule.
+ * [in]   ipa		    The intermediate physical address of the realm granule.
+ * [in]   s2_walk	    Address of s2_walk_result structure to return:
+ * [out]  s2_walk.pa	    The physical address of the realm granule.
+ * [out]  s2_walk.rtt_level The last level reached by the table walk.
+ * [out]  s2_walk.ripas	    RIPAS of s2tte.
+ * [out]  s2_walk.destroyed 'true', if s2tte has HIPAS=DESTROYED.
+ * [out]  s2_walk.llt	    Pointer to the last level page table which contains
+ *			    the mapping of the granule. If function returns with
+ *			    WALK_SUCCESS then 'llt' must be unlocked by the caller.
+ *			    Lock avoids to destoy the realm granule while RMM
+ *			    accessing to it.
+ * Returns:
+ * WALK_SUCCESS		Translation succeeded.
+ * WALK_INVALID_PARAMS	Parameter 'ipa' is unaligned or is not a Protected IPA.
+ * WALK_FAIL		Mapping is not in the page table. NS Host needs to fix.
+ */
+enum s2_walk_status realm_ipa_to_pa(struct rd *rd,
+				    unsigned long ipa,
+				    struct s2_walk_result *s2_walk)
+{
+	struct granule *g_table_root;
+	struct rtt_walk wi;
+	unsigned long s2tte, *ll_table, offset;
+	enum s2_walk_status walk_status;
+
+	if (!GRANULE_ALIGNED(ipa) || !addr_in_par(rd, ipa)) {
+		return WALK_INVALID_PARAMS;
+	}
+
+	/*
+	 * SW table walk to find corresponding PA. It handles cases when buffer
+	 * is mapped on page level or on block level.
+	 *
+	 * Todo:
+	 * - Page mapping is assumed.
+	 */
+	g_table_root = rd->s2_ctx.g_rtt;
+	granule_lock(g_table_root, GRANULE_STATE_RTT);
+	rtt_walk_lock_unlock(g_table_root,
+			     realm_rtt_starting_level(rd),
+			     realm_ipa_bits(rd),
+			     ipa,
+			     RTT_PAGE_LEVEL,
+			     &wi);
+
+	ll_table = granule_map(wi.g_llt, SLOT_RTT);
+
+	/* Must be unlocked by caller */
+	s2_walk->llt = wi.g_llt;
+	s2tte = s2tte_read(&ll_table[wi.index]);
+
+	if (!s2tte_is_valid(s2tte, wi.last_level)) {
+		/*
+		 * This 'tte' is still not been made valid by the Host.
+		 * Depending on the RIPAS value, the caller needs to
+		 * emulate a Data Abort back to the Host or return error
+		 * back to Realm.
+		 */
+		s2_walk->rtt_level = wi.last_level;
+		if (s2tte_is_destroyed(s2tte)) {
+			s2_walk->destroyed = true;
+		} else {
+			s2_walk->ripas = s2tte_get_ripas(s2tte);
+		}
+		granule_unlock(wi.g_llt);
+		walk_status = WALK_FAIL;
+		goto out_unmap_table;
+	}
+
+	s2_walk->pa = s2tte_pa(s2tte, wi.last_level);
+	offset = ipa & (s2tte_map_size(wi.last_level) - 1UL);
+	s2_walk->pa += offset;
+	s2_walk->ripas = RMI_RAM;
+
+	walk_status = WALK_SUCCESS;
+
+out_unmap_table:
+	buffer_unmap(ll_table);
+	return walk_status;
+}
+
+/*
+ * Get RIPAS of IPA
+ *
+ * Parameters:
+ *	[in]  @rec:		Pointer to the rec
+ *	[in]  @ipa:		IPA for which RIPAS is queried.
+ *	[out] @ripas_ptr:	RIPAS value returned for the IPA
+ *	[out] @s2tte_destroyed: Set to true when s2tte has HIPAS=DESTROYED
+ * Returns:
+ *	None
+ */
+void realm_ipa_get_ripas(struct rec *rec, unsigned long ipa,
+			 enum ripas *ripas_ptr, bool *s2tte_destroyed)
+{
+	unsigned long s2tte, *ll_table;
+	struct rtt_walk wi;
+
+	assert(ripas_ptr != NULL);
+	assert(s2tte_destroyed != NULL);
+	assert(GRANULE_ALIGNED(ipa));
+	assert(addr_in_rec_par(rec, ipa));
+
+	granule_lock(rec->realm_info.g_rtt, GRANULE_STATE_RTT);
+
+	rtt_walk_lock_unlock(rec->realm_info.g_rtt,
+			     rec->realm_info.s2_starting_level,
+			     rec->realm_info.ipa_bits,
+			     ipa, RTT_PAGE_LEVEL, &wi);
+
+	ll_table = granule_map(wi.g_llt, SLOT_RTT);
+	s2tte = s2tte_read(&ll_table[wi.index]);
+
+	if (s2tte_is_destroyed(s2tte)) {
+		*s2tte_destroyed = true;
+	} else {
+		*s2tte_destroyed = false;
+		*ripas_ptr = s2tte_get_ripas(s2tte);
+	}
+
+	buffer_unmap(ll_table);
+	granule_unlock(wi.g_llt);
+}
diff --git a/runtime/rsi/system.c b/runtime/rsi/system.c
new file mode 100644
index 0000000..e13d54b
--- /dev/null
+++ b/runtime/rsi/system.c
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+#include <assert.h>
+#include <smc-rsi.h>
+
+COMPILER_ASSERT(RSI_ABI_VERSION_MAJOR <= 0x7FFF);
+COMPILER_ASSERT(RSI_ABI_VERSION_MINOR <= 0xFFFF);
+
+unsigned long system_rsi_abi_version(void)
+{
+	return RSI_ABI_VERSION;
+}