Trusted Firmware-A Tests, version 2.0

This is the first public version of the tests for the Trusted
Firmware-A project. Please see the documentation provided in the
source tree for more details.

Change-Id: I6f3452046a1351ac94a71b3525c30a4ca8db7867
Signed-off-by: Sandrine Bailleux <sandrine.bailleux@arm.com>
Co-authored-by: amobal01 <amol.balasokamble@arm.com>
Co-authored-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
Co-authored-by: Asha R <asha.r@arm.com>
Co-authored-by: Chandni Cherukuri <chandni.cherukuri@arm.com>
Co-authored-by: David Cunado <david.cunado@arm.com>
Co-authored-by: Dimitris Papastamos <dimitris.papastamos@arm.com>
Co-authored-by: Douglas Raillard <douglas.raillard@arm.com>
Co-authored-by: dp-arm <dimitris.papastamos@arm.com>
Co-authored-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com>
Co-authored-by: Jonathan Wright <jonathan.wright@arm.com>
Co-authored-by: Kévin Petit <kevin.petit@arm.com>
Co-authored-by: Roberto Vargas <roberto.vargas@arm.com>
Co-authored-by: Sathees Balya <sathees.balya@arm.com>
Co-authored-by: Shawon Roy <Shawon.Roy@arm.com>
Co-authored-by: Soby Mathew <soby.mathew@arm.com>
Co-authored-by: Thomas Abraham <thomas.abraham@arm.com>
Co-authored-by: Vikram Kanigiri <vikram.kanigiri@arm.com>
Co-authored-by: Yatharth Kochar <yatharth.kochar@arm.com>
diff --git a/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h b/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
new file mode 100644
index 0000000..37f3b53
--- /dev/null
+++ b/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_AARCH32_H
+#define XLAT_TABLES_AARCH32_H
+
+#include <arch.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+
+#if !defined(PAGE_SIZE)
+#error "PAGE_SIZE is not defined."
+#endif
+
+/*
+ * In AArch32 state, the MMU only supports 4KB page granularity, which means
+ * that the first translation table level is either 1 or 2. Both of them are
+ * allowed to have block and table descriptors. See section G4.5.6 of the
+ * ARMv8-A Architecture Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+#if PAGE_SIZE != PAGE_SIZE_4KB
+#error "Invalid granule size. AArch32 supports 4KB pages only."
+#endif
+
+#define MIN_LVL_BLOCK_DESC	U(1)
+
+#define XLAT_TABLE_LEVEL_MIN	U(1)
+
+/*
+ * Define the architectural limits of the virtual address space in AArch32
+ * state.
+ *
+ * TTBCR.TxSZ is calculated as 32 minus the width of said address space. The
+ * value of TTBCR.TxSZ must be in the range 0 to 7 [1], which means that the
+ * virtual address space width must be in the range 32 to 25 bits.
+ *
+ * [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information, Section G4.6.5
+ */
+#define MIN_VIRT_ADDR_SPACE_SIZE	(ULL(1) << (U(32) - TTBCR_TxSZ_MAX))
+#define MAX_VIRT_ADDR_SPACE_SIZE	(ULL(1) << (U(32) - TTBCR_TxSZ_MIN))
+
+/*
+ * Here we calculate the initial lookup level from the value of the given
+ * virtual address space size. For a 4 KB page size,
+ * - level 1 supports virtual address spaces of widths 32 to 31 bits;
+ * - level 2 from 30 to 25.
+ *
+ * Wider or narrower address spaces are not supported. As a result, level 3
+ * cannot be used as the initial lookup level with 4 KB granularity.
+ * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information, Section G4.6.5
+ *
+ * For example, for a 31-bit address space (i.e. virt_addr_space_size ==
+ * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
+ * G4-5 in the ARM ARM, the initial lookup level for an address space like that
+ * is 1.
+ *
+ * Note that this macro assumes that the given virtual address space size is
+ * valid. Therefore, the caller is expected to check it is the case using the
+ * CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
+ */
+#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_sz)			\
+	(((_virt_addr_space_sz) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ?	\
+	 U(1) : U(2))
+
+#endif /* XLAT_TABLES_AARCH32_H */
diff --git a/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h b/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
new file mode 100644
index 0000000..91ca8e4
--- /dev/null
+++ b/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_AARCH64_H
+#define XLAT_TABLES_AARCH64_H
+
+#include <arch.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+
+#if !defined(PAGE_SIZE)
+#error "PAGE_SIZE is not defined."
+#endif
+
+/*
+ * Encode a Physical Address Space size for its use in TCR_ELx.
+ */
+unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
+
+/*
+ * In AArch64 state, the MMU may support 4 KB, 16 KB and 64 KB page
+ * granularity. For 4KB granularity, a level 0 table descriptor doesn't support
+ * block translation. For 16KB, the same thing happens to levels 0 and 1. For
+ * 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture
+ * Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+#if PAGE_SIZE == PAGE_SIZE_4KB
+# define MIN_LVL_BLOCK_DESC	U(1)
+#elif (PAGE_SIZE == PAGE_SIZE_16KB) || (PAGE_SIZE == PAGE_SIZE_64KB)
+# define MIN_LVL_BLOCK_DESC	U(2)
+#endif
+
+#define XLAT_TABLE_LEVEL_MIN	U(0)
+
+/*
+ * Define the architectural limits of the virtual address space in AArch64
+ * state.
+ *
+ * TCR.TxSZ is calculated as 64 minus the width of said address space.
+ * The value of TCR.TxSZ must be in the range 16 to 39 [1], which means that
+ * the virtual address space width must be in the range 48 to 25 bits.
+ *
+ * [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information:
+ * Page 1730: 'Input address size', 'For all translation stages'.
+ */
+#define MIN_VIRT_ADDR_SPACE_SIZE	(ULL(1) << (U(64) - TCR_TxSZ_MAX))
+#define MAX_VIRT_ADDR_SPACE_SIZE	(ULL(1) << (U(64) - TCR_TxSZ_MIN))
+
+/*
+ * Here we calculate the initial lookup level from the value of the given
+ * virtual address space size. For a 4 KB page size,
+ * - level 0 supports virtual address spaces of widths 48 to 40 bits;
+ * - level 1 from 39 to 31;
+ * - level 2 from 30 to 25.
+ *
+ * Wider or narrower address spaces are not supported. As a result, level 3
+ * cannot be used as initial lookup level with 4 KB granularity. See section
+ * D4.2.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information.
+ *
+ * For example, for a 35-bit address space (i.e. virt_addr_space_size ==
+ * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
+ * D4-11 in the ARM ARM, the initial lookup level for an address space like that
+ * is 1.
+ *
+ * Note that this macro assumes that the given virtual address space size is
+ * valid. Therefore, the caller is expected to check it is the case using the
+ * CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
+ */
+#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_sz)		\
+	(((_virt_addr_space_sz) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT))	\
+	? 0U								\
+	 : (((_virt_addr_space_sz) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT))	\
+	 ? 1U : 2U))
+
+#endif /* XLAT_TABLES_AARCH64_H */
diff --git a/include/lib/xlat_tables/xlat_mmu_helpers.h b/include/lib/xlat_tables/xlat_mmu_helpers.h
new file mode 100644
index 0000000..ffb5abe
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_mmu_helpers.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_MMU_HELPERS_H
+#define XLAT_MMU_HELPERS_H
+
+/*
+ * The following flags are passed to enable_mmu_xxx() to override the default
+ * values used to program system registers while enabling the MMU.
+ */
+
+/*
+ * When this flag is used, all data access to Normal memory from this EL and all
+ * Normal memory accesses to the translation tables of this EL are non-cacheable
+ * for all levels of data and unified cache until the caches are enabled by
+ * setting the bit SCTLR_ELx.C.
+ */
+#define DISABLE_DCACHE			(U(1) << 0)
+
+/*
+ * Mark the translation tables as non-cacheable for the MMU table walker, which
+ * is a different observer from the PE/CPU. If the flag is not specified, the
+ * tables are cacheable for the MMU table walker.
+ *
+ * Note that, as far as the PE/CPU observer is concerned, the attributes used
+ * are the ones specified in the translation tables themselves. The MAIR
+ * register specifies the cacheability through the field AttrIndx of the lower
+ * attributes of the translation tables. The shareability is specified in the SH
+ * field of the lower attributes.
+ *
+ * The MMU table walker uses the attributes specified in the fields ORGNn, IRGNn
+ * and SHn of the TCR register to access the translation tables.
+ *
+ * The attributes specified in the TCR register and the tables can be different
+ * as there are no checks to prevent that. Special care must be taken to ensure
+ * that there aren't mismatches. The behaviour in that case is described in the
+ * sections 'Mismatched memory attributes' in the ARMv8 ARM.
+ */
+#define XLAT_TABLE_NC			(U(1) << 1)
+
+/*
+ * Offsets into a mmu_cfg_params array generated by setup_mmu_cfg(). All
+ * parameters are 64 bits wide.
+ */
+#define MMU_CFG_MAIR		0
+#define MMU_CFG_TCR		1
+#define MMU_CFG_TTBR0		2
+#define MMU_CFG_PARAM_MAX	3
+
+#ifndef __ASSEMBLY__
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+/*
+ * Return the values that the MMU configuration registers must contain for the
+ * specified translation context. `params` must be a pointer to array of size
+ * MMU_CFG_PARAM_MAX.
+ */
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+		   const uint64_t *base_table, unsigned long long max_pa,
+		   uintptr_t max_va, int xlat_regime);
+
+#ifdef AARCH32
+/* AArch32 specific translation table API */
+void enable_mmu_svc_mon(unsigned int flags);
+void enable_mmu_hyp(unsigned int flags);
+
+void enable_mmu_direct_svc_mon(unsigned int flags);
+void enable_mmu_direct_hyp(unsigned int flags);
+#else
+/* AArch64 specific translation table APIs */
+void enable_mmu_el1(unsigned int flags);
+void enable_mmu_el2(unsigned int flags);
+void enable_mmu_el3(unsigned int flags);
+
+void enable_mmu_direct_el1(unsigned int flags);
+void enable_mmu_direct_el2(unsigned int flags);
+void enable_mmu_direct_el3(unsigned int flags);
+#endif /* AARCH32 */
+
+bool xlat_arch_is_granule_size_supported(size_t size);
+size_t xlat_arch_get_max_supported_granule_size(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* XLAT_MMU_HELPERS_H */
diff --git a/include/lib/xlat_tables/xlat_tables_arch.h b/include/lib/xlat_tables/xlat_tables_arch.h
new file mode 100644
index 0000000..251b020
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_arch.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_ARCH_H
+#define XLAT_TABLES_ARCH_H
+
+#ifdef AARCH32
+#include "aarch32/xlat_tables_aarch32.h"
+#else
+#include "aarch64/xlat_tables_aarch64.h"
+#endif
+
+/*
+ * Evaluates to 1 if the given virtual address space size is valid, or 0 if it's
+ * not.
+ *
+ * A valid size is one that is a power of 2 and is within the architectural
+ * limits. Not that these limits are different for AArch32 and AArch64.
+ */
+#define CHECK_VIRT_ADDR_SPACE_SIZE(size)			\
+	(((unsigned long long)(size) >= MIN_VIRT_ADDR_SPACE_SIZE) &&	\
+	((unsigned long long)(size) <= MAX_VIRT_ADDR_SPACE_SIZE) &&	\
+	IS_POWER_OF_TWO(size))
+
+/*
+ * Evaluates to 1 if the given physical address space size is a power of 2,
+ * or 0 if it's not.
+ */
+#define CHECK_PHY_ADDR_SPACE_SIZE(size)				\
+	(IS_POWER_OF_TWO(size))
+
+/*
+ * Compute the number of entries required at the initial lookup level to address
+ * the whole virtual address space.
+ */
+#define GET_NUM_BASE_LEVEL_ENTRIES(addr_space_size)			\
+	((addr_space_size) >>						\
+		XLAT_ADDR_SHIFT(GET_XLAT_TABLE_LEVEL_BASE(addr_space_size)))
+
+#endif /* XLAT_TABLES_ARCH_H */
diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h
new file mode 100644
index 0000000..d260c3e
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_defs.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_DEFS_H
+#define XLAT_TABLES_DEFS_H
+
+#include <arch.h>
+#include <utils_def.h>
+#include <xlat_mmu_helpers.h>
+
+/* Miscellaneous MMU related constants */
+#define NUM_2MB_IN_GB		(U(1) << 9)
+#define NUM_4K_IN_2MB		(U(1) << 9)
+#define NUM_GB_IN_4GB		(U(1) << 2)
+
+#define TWO_MB_SHIFT		U(21)
+#define ONE_GB_SHIFT		U(30)
+#define FOUR_KB_SHIFT		U(12)
+
+#define ONE_GB_INDEX(x)		((x) >> ONE_GB_SHIFT)
+#define TWO_MB_INDEX(x)		((x) >> TWO_MB_SHIFT)
+#define FOUR_KB_INDEX(x)	((x) >> FOUR_KB_SHIFT)
+
+#define PAGE_SIZE_4KB		U(4096)
+#define PAGE_SIZE_16KB		U(16384)
+#define PAGE_SIZE_64KB		U(65536)
+
+#define INVALID_DESC		U(0x0)
+/*
+ * A block descriptor points to a region of memory bigger than the granule size
+ * (e.g. a 2MB region when the granule size is 4KB).
+ */
+#define BLOCK_DESC		U(0x1) /* Table levels 0-2 */
+/* A table descriptor points to the next level of translation table. */
+#define TABLE_DESC		U(0x3) /* Table levels 0-2 */
+/*
+ * A page descriptor points to a page, i.e. a memory region whose size is the
+ * translation granule size (e.g. 4KB).
+ */
+#define PAGE_DESC		U(0x3) /* Table level 3 */
+
+#define DESC_MASK		U(0x3)
+
+#define FIRST_LEVEL_DESC_N	ONE_GB_SHIFT
+#define SECOND_LEVEL_DESC_N	TWO_MB_SHIFT
+#define THIRD_LEVEL_DESC_N	FOUR_KB_SHIFT
+
+/* XN: Translation regimes that support one VA range (EL2 and EL3). */
+#define XN			(ULL(1) << 2)
+/* UXN, PXN: Translation regimes that support two VA ranges (EL1&0). */
+#define UXN			(ULL(1) << 2)
+#define PXN			(ULL(1) << 1)
+#define CONT_HINT		(ULL(1) << 0)
+#define UPPER_ATTRS(x)		(((x) & ULL(0x7)) << 52)
+
+#define NON_GLOBAL		(U(1) << 9)
+#define ACCESS_FLAG		(U(1) << 8)
+#define NSH			(U(0x0) << 6)
+#define OSH			(U(0x2) << 6)
+#define ISH			(U(0x3) << 6)
+
+#define TABLE_ADDR_MASK		ULL(0x0000FFFFFFFFF000)
+
+/*
+ * The ARMv8-A architecture allows translation granule sizes of 4KB, 16KB or
+ * 64KB. However, only 4KB are supported at the moment.
+ */
+#define PAGE_SIZE_SHIFT		FOUR_KB_SHIFT
+#define PAGE_SIZE		(U(1) << PAGE_SIZE_SHIFT)
+#define PAGE_SIZE_MASK		(PAGE_SIZE - U(1))
+#define IS_PAGE_ALIGNED(addr)	(((addr) & PAGE_SIZE_MASK) == U(0))
+
+#define XLAT_ENTRY_SIZE_SHIFT	U(3) /* Each MMU table entry is 8 bytes (1 << 3) */
+#define XLAT_ENTRY_SIZE		(U(1) << XLAT_ENTRY_SIZE_SHIFT)
+
+#define XLAT_TABLE_SIZE_SHIFT	PAGE_SIZE_SHIFT /* Size of one complete table */
+#define XLAT_TABLE_SIZE		(U(1) << XLAT_TABLE_SIZE_SHIFT)
+
+#define XLAT_TABLE_LEVEL_MAX	U(3)
+
+/* Values for number of entries in each MMU translation table */
+#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
+#define XLAT_TABLE_ENTRIES	(U(1) << XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_TABLE_ENTRIES_MASK	(XLAT_TABLE_ENTRIES - U(1))
+
+/* Values to convert a memory address to an index into a translation table */
+#define L3_XLAT_ADDRESS_SHIFT	PAGE_SIZE_SHIFT
+#define L2_XLAT_ADDRESS_SHIFT	(L3_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define L1_XLAT_ADDRESS_SHIFT	(L2_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define L0_XLAT_ADDRESS_SHIFT	(L1_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_ADDR_SHIFT(level)	(PAGE_SIZE_SHIFT + \
+		  ((XLAT_TABLE_LEVEL_MAX - (level)) * XLAT_TABLE_ENTRIES_SHIFT))
+
+#define XLAT_BLOCK_SIZE(level)	(UL(1) << XLAT_ADDR_SHIFT(level))
+/* Mask to get the bits used to index inside a block of a certain level */
+#define XLAT_BLOCK_MASK(level)	(XLAT_BLOCK_SIZE(level) - UL(1))
+/* Mask to get the address bits common to a block of a certain table level*/
+#define XLAT_ADDR_MASK(level)	(~XLAT_BLOCK_MASK(level))
+/*
+ * Extract from the given virtual address the index into the given lookup level.
+ * This macro assumes the system is using the 4KB translation granule.
+ */
+#define XLAT_TABLE_IDX(virtual_addr, level)	\
+	(((virtual_addr) >> XLAT_ADDR_SHIFT(level)) & ULL(0x1FF))
+
+/*
+ * The ARMv8 translation table descriptor format defines AP[2:1] as the Access
+ * Permissions bits, and does not define an AP[0] bit.
+ *
+ * AP[1] is valid only for a stage 1 translation that supports two VA ranges
+ * (i.e. in the ARMv8A.0 architecture, that is the S-EL1&0 regime). It is RES1
+ * when stage 1 translations can only support one VA range.
+ */
+#define AP2_SHIFT			U(0x7)
+#define AP2_RO				ULL(0x1)
+#define AP2_RW				ULL(0x0)
+
+#define AP1_SHIFT			U(0x6)
+#define AP1_ACCESS_UNPRIVILEGED		ULL(0x1)
+#define AP1_NO_ACCESS_UNPRIVILEGED	ULL(0x0)
+#define AP1_RES1			ULL(0x1)
+
+/*
+ * The following definitions must all be passed to the LOWER_ATTRS() macro to
+ * get the right bitmask.
+ */
+#define AP_RO				(AP2_RO << 5)
+#define AP_RW				(AP2_RW << 5)
+#define AP_ACCESS_UNPRIVILEGED		(AP1_ACCESS_UNPRIVILEGED    << 4)
+#define AP_NO_ACCESS_UNPRIVILEGED	(AP1_NO_ACCESS_UNPRIVILEGED << 4)
+#define AP_ONE_VA_RANGE_RES1		(AP1_RES1 << 4)
+#define NS				(U(0x1) << 3)
+#define ATTR_NON_CACHEABLE_INDEX	ULL(0x2)
+#define ATTR_DEVICE_INDEX		ULL(0x1)
+#define ATTR_IWBWA_OWBWA_NTR_INDEX	ULL(0x0)
+#define LOWER_ATTRS(x)			(((x) & U(0xfff)) << 2)
+
+/* Normal Memory, Outer Write-Through non-transient, Inner Non-cacheable */
+#define ATTR_NON_CACHEABLE		MAKE_MAIR_NORMAL_MEMORY(MAIR_NORM_NC, MAIR_NORM_NC)
+/* Device-nGnRE */
+#define ATTR_DEVICE			MAIR_DEV_nGnRE
+/* Normal Memory, Outer Write-Back non-transient, Inner Write-Back non-transient */
+#define ATTR_IWBWA_OWBWA_NTR		MAKE_MAIR_NORMAL_MEMORY(MAIR_NORM_WB_NTR_RWA, MAIR_NORM_WB_NTR_RWA)
+#define MAIR_ATTR_SET(attr, index)	((attr) << ((index) << 3))
+#define ATTR_INDEX_MASK			U(0x3)
+#define ATTR_INDEX_GET(attr)		(((attr) >> 2) & ATTR_INDEX_MASK)
+
+/*
+ * Shift values for the attributes fields in a block or page descriptor.
+ * See section D4.3.3 in the ARMv8-A ARM (issue B.a).
+ */
+
+/* Memory attributes index field, AttrIndx[2:0]. */
+#define ATTR_INDEX_SHIFT		2
+/* Non-secure bit, NS. */
+#define NS_SHIFT			5
+/* Shareability field, SH[1:0] */
+#define SHAREABILITY_SHIFT		8
+/* The Access Flag, AF. */
+#define ACCESS_FLAG_SHIFT		10
+/* The not global bit, nG. */
+#define NOT_GLOBAL_SHIFT		11
+/* Contiguous hint bit. */
+#define CONT_HINT_SHIFT			52
+/* Execute-never bits, XN. */
+#define PXN_SHIFT			53
+#define XN_SHIFT			54
+#define UXN_SHIFT			XN_SHIFT
+
+#endif /* __XLAT_TABLES_DEFS_H__ */
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
new file mode 100644
index 0000000..4299c31
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_V2_H
+#define XLAT_TABLES_V2_H
+
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2_helpers.h>
+
+#ifndef __ASSEMBLY__
+#include <stddef.h>
+#include <stdint.h>
+#include <xlat_mmu_helpers.h>
+
+/*
+ * Default granularity size for an mmap_region_t.
+ * Useful when no specific granularity is required.
+ *
+ * By default, choose the biggest possible block size allowed by the
+ * architectural state and granule size in order to minimize the number of page
+ * tables required for the mapping.
+ */
+#define REGION_DEFAULT_GRANULARITY	XLAT_BLOCK_SIZE(MIN_LVL_BLOCK_DESC)
+
+/* Helper macro to define an mmap_region_t. */
+#define MAP_REGION(_pa, _va, _sz, _attr)	\
+	MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, REGION_DEFAULT_GRANULARITY)
+
+/* Helper macro to define an mmap_region_t with an identity mapping. */
+#define MAP_REGION_FLAT(_adr, _sz, _attr)			\
+	MAP_REGION(_adr, _adr, _sz, _attr)
+
+/*
+ * Helper macro to define entries for mmap_region_t. It allows to define 'pa'
+ * and sets 'va' to 0 for each region. To be used with mmap_add_alloc_va().
+ */
+#define MAP_REGION_ALLOC_VA(pa, sz, attr)	MAP_REGION(pa, 0, sz, attr)
+
+/*
+ * Helper macro to define an mmap_region_t to map with the desired granularity
+ * of translation tables.
+ *
+ * The granularity value passed to this macro must be a valid block or page
+ * size. When using a 4KB translation granule, this might be 4KB, 2MB or 1GB.
+ * Passing REGION_DEFAULT_GRANULARITY is also allowed and means that the library
+ * is free to choose the granularity for this region. In this case, it is
+ * equivalent to the MAP_REGION() macro.
+ */
+#define MAP_REGION2(_pa, _va, _sz, _attr, _gr)			\
+	MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
+
+/*
+ * Shifts and masks to access fields of an mmap attribute
+ */
+#define MT_TYPE_MASK		U(0x7)
+#define MT_TYPE(_attr)		((_attr) & MT_TYPE_MASK)
+/* Access permissions (RO/RW) */
+#define MT_PERM_SHIFT		U(3)
+/* Security state (SECURE/NS) */
+#define MT_SEC_SHIFT		U(4)
+/* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
+#define MT_EXECUTE_SHIFT	U(5)
+/* In the EL1&0 translation regime, User (EL0) or Privileged (EL1). */
+#define MT_USER_SHIFT		U(6)
+/* All other bits are reserved */
+
+/*
+ * Memory mapping attributes
+ */
+
+/*
+ * Memory types supported.
+ * These are organised so that, going down the list, the memory types are
+ * getting weaker; conversely going up the list the memory types are getting
+ * stronger.
+ */
+#define MT_DEVICE		U(0)
+#define MT_NON_CACHEABLE	U(1)
+#define MT_MEMORY		U(2)
+/* Values up to 7 are reserved to add new memory types in the future */
+
+#define MT_RO			(U(0) << MT_PERM_SHIFT)
+#define MT_RW			(U(1) << MT_PERM_SHIFT)
+
+#define MT_SECURE		(U(0) << MT_SEC_SHIFT)
+#define MT_NS			(U(1) << MT_SEC_SHIFT)
+
+/*
+ * Access permissions for instruction execution are only relevant for normal
+ * read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored (and potentially
+ * overridden) otherwise:
+ *  - Device memory is always marked as execute-never.
+ *  - Read-write normal memory is always marked as execute-never.
+ */
+#define MT_EXECUTE		(U(0) << MT_EXECUTE_SHIFT)
+#define MT_EXECUTE_NEVER	(U(1) << MT_EXECUTE_SHIFT)
+
+/*
+ * When mapping a region at EL0 or EL1, this attribute will be used to determine
+ * if a User mapping (EL0) will be created or a Privileged mapping (EL1).
+ */
+#define MT_USER			(U(1) << MT_USER_SHIFT)
+#define MT_PRIVILEGED		(U(0) << MT_USER_SHIFT)
+
+/* Compound attributes for most common usages */
+#define MT_CODE			(MT_MEMORY | MT_RO | MT_EXECUTE)
+#define MT_RO_DATA		(MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+#define MT_RW_DATA		(MT_MEMORY | MT_RW | MT_EXECUTE_NEVER)
+
+#if !ERROR_DEPRECATED
+typedef unsigned int mmap_attr_t;
+#endif
+
+/*
+ * Structure for specifying a single region of memory.
+ */
+typedef struct mmap_region {
+	unsigned long long	base_pa;
+	uintptr_t		base_va;
+	size_t			size;
+	unsigned int		attr;
+	/* Desired granularity. See the MAP_REGION2() macro for more details. */
+	size_t			granularity;
+} mmap_region_t;
+
+/*
+ * Translation regimes supported by this library. EL_REGIME_INVALID tells the
+ * library to detect it at runtime.
+ */
+#define EL1_EL0_REGIME		1
+#define EL2_REGIME		2
+#define EL3_REGIME		3
+#define EL_REGIME_INVALID	-1
+
+/*
+ * Declare the translation context type.
+ * Its definition is private.
+ */
+typedef struct xlat_ctx xlat_ctx_t;
+
+/*
+ * Statically allocate a translation context and associated structures. Also
+ * initialize them.
+ *
+ * _ctx_name:
+ *   Prefix for the translation context variable.
+ *   E.g. If _ctx_name is 'foo', the variable will be called 'foo_xlat_ctx'.
+ *   Useful to distinguish multiple contexts from one another.
+ *
+ * _mmap_count:
+ *   Number of mmap_region_t to allocate.
+ *   Would typically be MAX_MMAP_REGIONS for the translation context describing
+ *   the software image currently executing.
+ *
+ * _xlat_tables_count:
+ *   Number of sub-translation tables to allocate.
+ *   Would typically be MAX_XLAT_TABLES for the translation context describing
+ *   the software image currently executing.
+ *   Note that this is only for sub-tables ; at the initial lookup level, there
+ *   is always a single table.
+ *
+ * _virt_addr_space_size, _phy_addr_space_size:
+ *   Size (in bytes) of the virtual (resp. physical) address space.
+ *   Would typically be PLAT_VIRT_ADDR_SPACE_SIZE
+ *   (resp. PLAT_PHY_ADDR_SPACE_SIZE) for the translation context describing the
+ *   software image currently executing.
+ */
+#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
+			_virt_addr_space_size, _phy_addr_space_size)	\
+	REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count),	\
+					 (_xlat_tables_count),		\
+					 (_virt_addr_space_size),	\
+					 (_phy_addr_space_size),	\
+					 EL_REGIME_INVALID, "xlat_table")
+
+/*
+ * Same as REGISTER_XLAT_CONTEXT plus the additional parameters:
+ *
+ * _xlat_regime:
+ *   Specify the translation regime managed by this xlat_ctx_t instance. The
+ *   values are the one from the EL*_REGIME definitions.
+ *
+ * _section_name:
+ *   Specify the name of the section where the translation tables have to be
+ *   placed by the linker.
+ */
+#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count, \
+			_virt_addr_space_size, _phy_addr_space_size,	\
+			_xlat_regime, _section_name)			\
+	REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count),	\
+					 (_xlat_tables_count),		\
+					 (_virt_addr_space_size),	\
+					 (_phy_addr_space_size),	\
+					 (_xlat_regime), (_section_name))
+
+/******************************************************************************
+ * Generic translation table APIs.
+ * Each API comes in 2 variants:
+ * - one that acts on the current translation context for this software image
+ * - another that acts on the given translation context instead. This variant
+ *   is named after the 1st version, with an additional '_ctx' suffix.
+ *****************************************************************************/
+
+/*
+ * Initialize translation tables from the current list of mmap regions. Calling
+ * this function marks the transition point after which static regions can no
+ * longer be added.
+ */
+void init_xlat_tables(void);
+void init_xlat_tables_ctx(xlat_ctx_t *ctx);
+
+/*
+ * Add a static region with defined base PA and base VA. This function can only
+ * be used before initializing the translation tables. The region cannot be
+ * removed afterwards.
+ */
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
+		     size_t size, unsigned int attr);
+void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm);
+
+/*
+ * Add an array of static regions with defined base PA and base VA. This
+ * function can only be used before initializing the translation tables. The
+ * regions cannot be removed afterwards.
+ */
+void mmap_add(const mmap_region_t *mm);
+void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm);
+
+/*
+ * Add a region with defined base PA. Returns base VA calculated using the
+ * highest existing region in the mmap array even if it fails to allocate the
+ * region.
+ */
+void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
+			      size_t size, unsigned int attr);
+void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Add an array of static regions with defined base PA, and fill the base VA
+ * field on the array of structs. This function can only be used before
+ * initializing the translation tables. The regions cannot be removed afterwards.
+ */
+void mmap_add_alloc_va(mmap_region_t *mm);
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+/*
+ * Add a dynamic region with defined base PA and base VA. This type of region
+ * can be added and removed even after the translation tables are initialized.
+ *
+ * Returns:
+ *        0: Success.
+ *   EINVAL: Invalid values were used as arguments.
+ *   ERANGE: Memory limits were surpassed.
+ *   ENOMEM: Not enough space in the mmap array or not enough free xlat tables.
+ *    EPERM: It overlaps another region in an invalid way.
+ */
+int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
+			    size_t size, unsigned int attr);
+int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Add a dynamic region with defined base PA. Returns base VA calculated using
+ * the highest existing region in the mmap array even if it fails to allocate
+ * the region.
+ *
+ * mmap_add_dynamic_region_alloc_va() returns the allocated VA in 'base_va'.
+ * mmap_add_dynamic_region_alloc_va_ctx() returns it in 'mm->base_va'.
+ *
+ * It returns the same error values as mmap_add_dynamic_region().
+ */
+int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
+				     uintptr_t *base_va,
+				     size_t size, unsigned int attr);
+int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Remove a region with the specified base VA and size. Only dynamic regions can
+ * be removed, and they can be removed even if the translation tables are
+ * initialized.
+ *
+ * Returns:
+ *        0: Success.
+ *   EINVAL: The specified region wasn't found.
+ *    EPERM: Trying to remove a static region.
+ */
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size);
+int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx,
+				uintptr_t base_va,
+				size_t size);
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * Change the memory attributes of the memory region starting from a given
+ * virtual address in a set of translation tables.
+ *
+ * This function can only be used after the translation tables have been
+ * initialized.
+ *
+ * The base address of the memory region must be aligned on a page boundary.
+ * The size of this memory region must be a multiple of a page size.
+ * The memory region must be already mapped by the given translation tables
+ * and it must be mapped at the granularity of a page.
+ *
+ * Return 0 on success, a negative value on error.
+ *
+ * In case of error, the memory attributes remain unchanged and this function
+ * has no effect.
+ *
+ * ctx
+ *   Translation context to work on.
+ * base_va:
+ *   Virtual address of the 1st page to change the attributes of.
+ * size:
+ *   Size in bytes of the memory region.
+ * attr:
+ *   New attributes of the page tables. The attributes that can be changed are
+ *   data access (MT_RO/MT_RW), instruction access (MT_EXECUTE_NEVER/MT_EXECUTE)
+ *   and user/privileged access (MT_USER/MT_PRIVILEGED) in the case of contexts
+ *   that are used in the EL1&0 translation regime. Also, note that this
+ *   function doesn't allow to remap a region as RW and executable, or to remap
+ *   device memory as executable.
+ *
+ * NOTE: The caller of this function must be able to write to the translation
+ * tables, i.e. the memory where they are stored must be mapped with read-write
+ * access permissions. This function assumes it is the case. If this is not
+ * the case then this function might trigger a data abort exception.
+ *
+ * NOTE2: The caller is responsible for making sure that the targeted
+ * translation tables are not modified by any other code while this function is
+ * executing.
+ */
+int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+				   size_t size, uint32_t attr);
+int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr);
+
+/*
+ * Query the memory attributes of a memory page in a set of translation tables.
+ *
+ * Return 0 on success, a negative error code on error.
+ * On success, the attributes are stored into *attr.
+ *
+ * ctx
+ *   Translation context to work on.
+ * base_va
+ *   Virtual address of the page to get the attributes of.
+ *   There are no alignment restrictions on this address. The attributes of the
+ *   memory page it lies within are returned.
+ * attr
+ *   Output parameter where to store the attributes of the targeted memory page.
+ */
+int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+				uint32_t *attr);
+int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr);
+
+#endif /*__ASSEMBLY__*/
+#endif /* XLAT_TABLES_V2_H */
diff --git a/include/lib/xlat_tables/xlat_tables_v2_helpers.h b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
new file mode 100644
index 0000000..fa89958
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This header file contains internal definitions that are not supposed to be
+ * used outside of this library code.
+ */
+
+#ifndef XLAT_TABLES_V2_HELPERS_H
+#define XLAT_TABLES_V2_HELPERS_H
+
+#ifndef XLAT_TABLES_V2_H
+#error "Do not include this header file directly. Include xlat_tables_v2.h instead."
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <xlat_tables_arch.h>
+#include <xlat_tables_defs.h>
+
+/* Forward declaration */
+struct mmap_region;
+
+/*
+ * Helper macro to define an mmap_region_t.  This macro allows to specify all
+ * the fields of the structure but its parameter list is not guaranteed to
+ * remain stable as we add members to mmap_region_t.
+ */
+#define MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)		\
+	{							\
+		.base_pa = (_pa),				\
+		.base_va = (_va),				\
+		.size = (_sz),					\
+		.attr = (_attr),				\
+		.granularity = (_gr),				\
+	}
+
+/* Struct that holds all information about the translation tables. */
+struct xlat_ctx {
+	/*
+	 * Max allowed Virtual and Physical Addresses.
+	 */
+	unsigned long long pa_max_address;
+	uintptr_t va_max_address;
+
+	/*
+	 * Array of all memory regions stored in order of ascending end address
+	 * and ascending size to simplify the code that allows overlapping
+	 * regions. The list is terminated by the first entry with size == 0.
+	 * The max size of the list is stored in `mmap_num`. `mmap` points to an
+	 * array of mmap_num + 1 elements, so that there is space for the final
+	 * null entry.
+	 */
+	struct mmap_region *mmap;
+	int mmap_num;
+
+	/*
+	 * Array of finer-grain translation tables.
+	 * For example, if the initial lookup level is 1 then this array would
+	 * contain both level-2 and level-3 entries.
+	 */
+	uint64_t (*tables)[XLAT_TABLE_ENTRIES];
+	int tables_num;
+	/*
+	 * Keep track of how many regions are mapped in each table. The base
+	 * table can't be unmapped so it isn't needed to keep track of it.
+	 */
+#if PLAT_XLAT_TABLES_DYNAMIC
+	int *tables_mapped_regions;
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+	int next_table;
+
+	/*
+	 * Base translation table. It doesn't need to have the same amount of
+	 * entries as the ones used for other levels.
+	 */
+	uint64_t *base_table;
+	unsigned int base_table_entries;
+
+	/*
+	* Max Physical and Virtual addresses currently in use by the
+	* translation tables. These might get updated as we map/unmap memory
+	* regions but they will never go beyond pa/va_max_address.
+	*/
+	unsigned long long max_pa;
+	uintptr_t max_va;
+
+	/* Level of the base translation table. */
+	unsigned int base_level;
+
+	/* Set to true when the translation tables are initialized. */
+	bool initialized;
+
+	/*
+	 * Translation regime managed by this xlat_ctx_t. It should be one of
+	 * the EL*_REGIME defines.
+	 */
+	int xlat_regime;
+};
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+#define XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count)		\
+	static int _ctx_name##_mapped_regions[_xlat_tables_count];
+
+#define XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name)				\
+	.tables_mapped_regions = _ctx_name##_mapped_regions,
+#else
+#define XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count)		\
+	/* do nothing */
+
+#define XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name)				\
+	/* do nothing */
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+#define REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count,		\
+			_xlat_tables_count, _virt_addr_space_size,	\
+			_phy_addr_space_size, _xlat_regime, _section_name)\
+	CASSERT(CHECK_VIRT_ADDR_SPACE_SIZE(_virt_addr_space_size),	\
+		assert_invalid_virtual_addr_space_size_for_##_ctx_name);\
+									\
+	CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(_phy_addr_space_size),	\
+		assert_invalid_physical_addr_space_sizefor_##_ctx_name);\
+									\
+	static mmap_region_t _ctx_name##_mmap[_mmap_count + 1];		\
+									\
+	static uint64_t _ctx_name##_xlat_tables[_xlat_tables_count]	\
+		[XLAT_TABLE_ENTRIES]					\
+		__aligned(XLAT_TABLE_SIZE) __section(_section_name);	\
+									\
+	static uint64_t _ctx_name##_base_xlat_table			\
+		[GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)]	\
+		__aligned(GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)\
+			* sizeof(uint64_t));				\
+									\
+	XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count)		\
+									\
+	static xlat_ctx_t _ctx_name##_xlat_ctx = {			\
+		.va_max_address = (_virt_addr_space_size) - 1UL,	\
+		.pa_max_address = (_phy_addr_space_size) - 1ULL,	\
+		.mmap = _ctx_name##_mmap,				\
+		.mmap_num = (_mmap_count),				\
+		.base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size),\
+		.base_table = _ctx_name##_base_xlat_table,		\
+		.base_table_entries =					\
+			GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size),\
+		.tables = _ctx_name##_xlat_tables,			\
+		.tables_num = _xlat_tables_count,			\
+		 XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name)			\
+		.xlat_regime = (_xlat_regime),				\
+		.max_pa = 0U,						\
+		.max_va = 0U,						\
+		.next_table = 0,					\
+		.initialized = false,					\
+	}
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* XLAT_TABLES_V2_HELPERS_H */