refactor(xlat): add several refactorizations to the xlat library
This patch includes a number of refactorizations of the xlat library in
order to facilitate the implementation of unittests for the library.
Amongst the improvements there are:
* Macros to allocate and initialize the translation tables as well
as the translation context structures have been removed and
replaced with library APIs.
* Some of the library APIs have been collapsed, reducing the size
of the overall interface and presenting a more compact API to
the consumer.
* The distinction between the translation base tables and the
intermediate tables have been removed, presenting now a single
array of tables for the whole translation context.
The patch also adapts the rest of RMM to use the new library API.
Signed-off-by: Javier Almansa Sobrino <javier.almansasobrino@arm.com>
Change-Id: I2fb486c0c5bc005446b09e3fef5e7de9bf0efda0
diff --git a/configs/fvp_defcfg.cmake b/configs/fvp_defcfg.cmake
index 1451ebc..a269501 100644
--- a/configs/fvp_defcfg.cmake
+++ b/configs/fvp_defcfg.cmake
@@ -26,10 +26,15 @@
arm_config_option_override(NAME RMM_UART_ADDR DEFAULT 0x1c0c0000)
#
+# Extra memory regions needed by this platform
+#
+arm_config_option_override(NAME PLAT_CMN_EXTRA_MMAP_REGIONS DEFAULT 1)
+
+#
# Maximum number of translation tables allocated by the runtime context
# for the translation library.
#
-arm_config_option_override(NAME PLAT_CMN_CTX_MAX_XLAT_TABLES DEFAULT 5)
+arm_config_option_override(NAME PLAT_CMN_CTX_MAX_XLAT_TABLES DEFAULT 6)
#
# Disable FPU/SIMD usage in RMM. Enabling this option turns on
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index 0db4cdc..5e0adfe 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -214,7 +214,7 @@
RMM_STATIC_ANALYSIS_CPPCHECK_CHECKER_THREAD_SAFETY ,ON | OFF ,ON ,"Enable Cppcheck's thread safety checker"
RMM_UART_ADDR , ,0x0 ,"Base addr of UART to be used for RMM logs"
PLAT_CMN_CTX_MAX_XLAT_TABLES , ,0 ,"Maximum number of translation tables used by the runtime context"
- PLAT_CMN_MAX_MMAP_REGIONS , ,5 ,"Maximum number of mmap regions to be allocated for the platform"
+ PLAT_CMN_EXTRA_MMAP_REGIONS , ,0 ,"Extra platform mmap regions that need to be mapped in S1 xlat tables"
RMM_NUM_PAGES_PER_STACK , ,3 ,"Number of pages to use per CPU stack"
MBEDTLS_ECP_MAX_OPS ,248 - ,1000 ,"Number of max operations per ECC signing iteration"
RMM_FPU_USE_AT_REL2 ,ON | OFF ,OFF(fake_host) ON(aarch64),"Enable FPU/SIMD usage in RMM."
diff --git a/lib/arch/include/arch.h b/lib/arch/include/arch.h
index b2c765c..328c264 100644
--- a/lib/arch/include/arch.h
+++ b/lib/arch/include/arch.h
@@ -140,8 +140,7 @@
#define TCR_EL2_E0PD1 (UL(1) << 56) /* TODO: ARMv8.5-E0PD, otherwise RES0 */
#define TCR_TxSZ_MIN UL(16)
-#define TCR_TxSZ_MAX UL(39)
-#define TCR_TxSZ_MAX_TTST UL(48)
+#define TCR_TxSZ_MAX UL(48)
/* HCR definitions */
#define HCR_FWB (UL(1) << 46)
diff --git a/lib/arch/src/arch_features.c b/lib/arch/src/arch_features.c
index ecba2e9..6c12796 100644
--- a/lib/arch/src/arch_features.c
+++ b/lib/arch/src/arch_features.c
@@ -18,7 +18,7 @@
* Physical Address ranges supported in the AArch64 Memory Model.
* Value 0b110 is supported in ARMv8.2 onwards but not used in RMM.
*/
- static const unsigned int pa_range_bits_arr[] = {
+ const unsigned int pa_range_bits_arr[] = {
PARANGE_0000_WIDTH, PARANGE_0001_WIDTH, PARANGE_0010_WIDTH,
PARANGE_0011_WIDTH, PARANGE_0100_WIDTH, PARANGE_0101_WIDTH,
/*
diff --git a/lib/realm/include/buffer.h b/lib/realm/include/buffer.h
index 7fc4187..7cb0a39 100644
--- a/lib/realm/include/buffer.h
+++ b/lib/realm/include/buffer.h
@@ -56,18 +56,27 @@
/*
* Initializes and enables the VMSA for the slot buffer mechanism.
*
- * Create an empty translation context for the current CPU.
- * If the context already exists (e.g. current CPU was previously
+ * Create an empty translation context for the current PE.
+ * If the context already exists (e.g. current PE was previously
* turned on and therefore the context is already in memory),
* nothing happens.
*/
void slot_buf_setup_xlat(void);
/*
- * Finishes initializing the slot buffer mechanism.
- * This function should be called after the MMU is enabled.
+ * Initializes the slot buffer components common to all PEs. This function
+ * must only be called once during cold boot initialization.
+ *
+ * Returns 0 on success and a negative POSIX error code otherwise.
*/
-void slot_buf_init(void);
+int slot_buf_coldboot_init(void);
+
+/*
+ * Finishes initializing the slot buffer mechanism.
+ * This function should be called after the MMU is enabled, during the
+ * warmboot path.
+ */
+void slot_buf_finish_warmboot_init(void);
/******************************************************************************
* Internal APIs not meant to be invoked by generic RMM code.
diff --git a/lib/realm/src/buffer.c b/lib/realm/src/buffer.c
index e7e69b6..ca046f6 100644
--- a/lib/realm/src/buffer.c
+++ b/lib/realm/src/buffer.c
@@ -24,7 +24,7 @@
#include <xlat_tables.h>
/*
- * All the slot buffers for a given CPU must be mapped by a single translation
+ * All the slot buffers for a given PE must be mapped by a single translation
* table, which means the max VA size should be <= 4KB * 512
*/
COMPILER_ASSERT((RMM_SLOT_BUF_VA_SIZE) <= (GRANULE_SIZE * XLAT_TABLE_ENTRIES));
@@ -54,32 +54,26 @@
/*
* The base tables for all the contexts are manually allocated as a continous
- * block of memory.
+ * block of memory (one L3 table per PE).
*/
-static uint64_t transient_base_table[XLAT_TABLE_ENTRIES * MAX_CPUS]
- __aligned(BASE_XLAT_TABLES_ALIGNMENT)
- __section("slot_buffer_xlat_tbls");
+static uint64_t slot_buf_s1tt[XLAT_TABLE_ENTRIES * MAX_CPUS]
+ __aligned(XLAT_TABLES_ALIGNMENT);
/* Allocate per-cpu xlat_ctx_tbls */
static struct xlat_ctx_tbls slot_buf_tbls[MAX_CPUS];
-/*
- * Allocate mmap regions and define common xlat_ctx_cfg shared will
- * all slot_buf_xlat_ctx
- */
-XLAT_REGISTER_VA_SPACE(slot_buf, VA_HIGH_REGION,
- SLOT_BUF_MMAP_REGIONS,
- RMM_SLOT_BUF_VA_SIZE);
+/* Allocate xlat_ctx_cfg for high VA which will be common to all PEs */
+static struct xlat_ctx_cfg slot_buf_xlat_ctx_cfg;
/* context definition */
static struct xlat_ctx slot_buf_xlat_ctx[MAX_CPUS];
/*
- * Allocate a cache to store the last level table entry where the slot buffers
+ * Allocate a cache to store the last level table info where the slot buffers
* are mapped to avoid needing to perform a table walk every time a buffer
- * slot operation is needed.
+ * slot operation has to be done.
*/
-static struct xlat_table_entry te_cache[MAX_CPUS];
+static struct xlat_tbl_info tbl_info_cache[MAX_CPUS];
uintptr_t slot_to_va(enum buffer_slot slot)
{
@@ -93,17 +87,32 @@
return &slot_buf_xlat_ctx[my_cpuid()];
}
-struct xlat_table_entry *get_cache_entry(void)
+struct xlat_tbl_info *get_cached_tbl_info(void)
{
- return &te_cache[my_cpuid()];
+ return &tbl_info_cache[my_cpuid()];
}
__unused static uint64_t slot_to_descriptor(enum buffer_slot slot)
{
- uint64_t *entry = xlat_get_pte_from_table(get_cache_entry(),
- slot_to_va(slot));
+ uint64_t *entry = xlat_get_tte_ptr(get_cached_tbl_info(),
+ slot_to_va(slot));
- return xlat_read_descriptor(entry);
+ return xlat_read_tte(entry);
+}
+
+int slot_buf_coldboot_init(void)
+{
+ static struct xlat_mmap_region slot_buf_regions[] = {
+ RMM_SLOT_BUF_MMAP,
+ };
+
+ /*
+ * Initialize the common configuration used for all
+ * translation contexts
+ */
+ return xlat_ctx_cfg_init(&slot_buf_xlat_ctx_cfg, VA_HIGH_REGION,
+ &slot_buf_regions[0], 1U,
+ RMM_SLOT_BUF_VA_SIZE);
}
/*
@@ -113,62 +122,28 @@
void slot_buf_setup_xlat(void)
{
unsigned int cpuid = my_cpuid();
- int ret = xlat_ctx_create_dynamic(get_slot_buf_xlat_ctx(),
- &slot_buf_xlat_ctx_cfg,
- &slot_buf_tbls[cpuid],
- &transient_base_table[
- XLAT_TABLE_ENTRIES * cpuid],
- GET_NUM_BASE_LEVEL_ENTRIES(
- RMM_SLOT_BUF_VA_SIZE),
- NULL,
- 0U);
+ struct xlat_ctx *slot_buf_ctx = get_slot_buf_xlat_ctx();
- if (ret == -EINVAL) {
+ /*
+ * Initialize the translation tables for the current context.
+ * This is done on the first boot of each PE.
+ */
+ int ret = xlat_ctx_init(slot_buf_ctx,
+ &slot_buf_xlat_ctx_cfg,
+ &slot_buf_tbls[cpuid],
+ &slot_buf_s1tt[XLAT_TABLE_ENTRIES * cpuid], 1U);
+
+ if (!((ret == 0) || (ret == -EALREADY))) {
/*
* If the context was already created, carry on with the
* initialization. If it cannot be created, panic.
*/
- ERROR("%s (%u): Failed to create the empty context for the slot buffers\n",
- __func__, __LINE__);
+ ERROR("%s (%u): Failed to initialize the xlat context for the slot buffers (-%i)\n",
+ __func__, __LINE__, ret);
panic();
}
- if (xlat_ctx_cfg_initialized(get_slot_buf_xlat_ctx()) == false) {
- /* Add necessary mmap regions during cold boot */
- struct xlat_mmap_region slot_buf_regions[] = {
- RMM_SLOT_BUF_MMAP,
- {0}
- };
-
- if (xlat_mmap_add_ctx(get_slot_buf_xlat_ctx(),
- slot_buf_regions, true) != 0) {
- ERROR("%s (%u): Failed to map slot buffer memory on high region\n",
- __func__, __LINE__);
- panic();
- }
-
- }
-
- if (xlat_ctx_tbls_initialized(get_slot_buf_xlat_ctx()) == false) {
- /*
- * Initialize the translation tables for the current context.
- * This is done on the first boot of each CPU.
- */
- int err;
-
- err = xlat_init_tables_ctx(get_slot_buf_xlat_ctx());
- if (err != 0) {
- ERROR("%s (%u): xlat initialization failed with code %i\n",
- __func__, __LINE__, err);
- panic();
- }
- }
-
- /*
- * Confugure MMU registers. This function assumes that all the
- * contexts of a particular VA region (HIGH or LOW VA) use the same
- * limits for VA and PA spaces.
- */
+ /* Configure MMU registers */
if (xlat_arch_setup_mmu_cfg(get_slot_buf_xlat_ctx())) {
ERROR("%s (%u): MMU registers failed to initialize\n",
__func__, __LINE__);
@@ -180,20 +155,17 @@
* Finishes initializing the slot buffer mechanism.
* This function must be called after the MMU is enabled.
*/
-void slot_buf_init(void)
+void slot_buf_finish_warmboot_init(void)
{
- if (is_mmu_enabled() == false) {
- ERROR("%s: MMU must be enabled\n", __func__);
- panic();
- }
+ assert(is_mmu_enabled() == true);
/*
* Initialize (if not done yet) the internal cache with the last level
* translation table that holds the MMU descriptors for the slot
* buffers, so we can access them faster when we need to map/unmap.
*/
- if ((get_cache_entry())->table == NULL) {
- if (xlat_get_table_from_va(get_cache_entry(),
+ if ((get_cached_tbl_info())->table == NULL) {
+ if (xlat_get_table_from_va(get_cached_tbl_info(),
get_slot_buf_xlat_ctx(),
slot_to_va(SLOT_NS)) != 0) {
ERROR("%s (%u): Failed to initialize table entry cache for CPU %u\n",
@@ -351,7 +323,7 @@
{
uint64_t attr = SLOT_DESC_ATTR;
uintptr_t va = slot_to_va(slot);
- struct xlat_table_entry *entry = get_cache_entry();
+ struct xlat_tbl_info *entry = get_cached_tbl_info();
assert(GRANULE_ALIGNED(addr));
@@ -374,5 +346,5 @@
*/
COMPILER_BARRIER();
- xlat_unmap_memory_page(get_cache_entry(), (uintptr_t)buf);
+ xlat_unmap_memory_page(get_cached_tbl_info(), (uintptr_t)buf);
}
diff --git a/lib/realm/src/include/buffer_private.h b/lib/realm/src/include/buffer_private.h
index 51b0984..ccceb41 100644
--- a/lib/realm/src/include/buffer_private.h
+++ b/lib/realm/src/include/buffer_private.h
@@ -19,5 +19,5 @@
#define SLOT_VIRT ((ULL(0xffffffffffffffff) - \
RMM_SLOT_BUF_VA_SIZE + ULL(1)))
-struct xlat_table_entry *get_cache_entry(void);
+struct xlat_tbl_info *get_cached_tbl_info(void);
uintptr_t slot_to_va(enum buffer_slot slot);
diff --git a/lib/realm/tests/buffer.cpp b/lib/realm/tests/buffer.cpp
index 6b04bec..77050df 100644
--- a/lib/realm/tests/buffer.cpp
+++ b/lib/realm/tests/buffer.cpp
@@ -1315,10 +1315,10 @@
*/
}
-TEST(slot_buffer, slot_buf_init_TC1)
+TEST(slot_buffer, slot_buf_finish_warmboot_init_TC1)
{
/*
- * slot_buf_init() has already been used during initialization
- * for all tests, so skip it.
+ * slot_buf_finish_warmboot_init() has already been used during
+ * initialization for all tests, so skip it.
*/
}
diff --git a/lib/realm/tests/realm_test_utils.c b/lib/realm/tests/realm_test_utils.c
index 9f00b97..0af8b47 100644
--- a/lib/realm/tests/realm_test_utils.c
+++ b/lib/realm/tests/realm_test_utils.c
@@ -17,10 +17,10 @@
*/
uintptr_t realm_test_util_slot_to_pa(enum buffer_slot slot)
{
- struct xlat_table_entry *entry = get_cache_entry();
+ struct xlat_tbl_info *entry = get_cached_tbl_info();
uintptr_t va = slot_to_va(slot);
- uint64_t *desc_ptr = xlat_get_pte_from_table(entry, va);
- uint64_t descriptor = xlat_read_descriptor(desc_ptr);
+ uint64_t *desc_ptr = xlat_get_tte_ptr(entry, va);
+ uint64_t descriptor = xlat_read_tte(desc_ptr);
return (uintptr_t)(descriptor & XLAT_TTE_L3_PA_MASK);
}
diff --git a/lib/xlat/CMakeLists.txt b/lib/xlat/CMakeLists.txt
index 86fbce0..ab727b8 100644
--- a/lib/xlat/CMakeLists.txt
+++ b/lib/xlat/CMakeLists.txt
@@ -20,7 +20,8 @@
target_sources(rmm-lib-xlat
PRIVATE "src/xlat_tables_core.c"
"src/xlat_tables_utils.c"
- "src/xlat_tables_arch.c")
+ "src/xlat_tables_arch.c"
+ "src/xlat_contexts.c")
if(NOT RMM_ARCH STREQUAL fake_host)
target_sources(rmm-lib-xlat
diff --git a/lib/xlat/include/xlat_contexts.h b/lib/xlat/include/xlat_contexts.h
index f0f6c09..007057f 100644
--- a/lib/xlat/include/xlat_contexts.h
+++ b/lib/xlat/include/xlat_contexts.h
@@ -11,15 +11,9 @@
#ifndef __ASSEMBLER__
-#include <assert.h>
#include <stdbool.h>
-#include <stddef.h>
-#include <utils_def.h>
#include <xlat_defs.h>
-/* Forward declaration */
-struct xlat_mmap_region;
-
/* Enumerator to identify the right address space within a context */
typedef enum xlat_addr_region_id {
VA_LOW_REGION = 0,
@@ -34,27 +28,11 @@
* private tables for each PE.
*/
struct xlat_ctx_tbls {
- /*
- * Array of finer-grain translation tables.
- * For example, if the initial lookup level is 1 then this array would
- * contain both level-2 and level-3 entries.
- */
- uint64_t (*tables)[XLAT_TABLE_ENTRIES];
+ /* Array of translation tables. */
+ uint64_t *tables;
unsigned int tables_num;
unsigned int next_table;
- /*
- * Base translation table.
- * It has the same number of entries as the ones used for other levels
- * although it is possible that not all the entries are used.
- *
- * If, as an example, the translation tables for the current context
- * start at L1, then the *tables field will contain the L2 and L3
- * tables.
- */
- uint64_t *base_table;
- unsigned int max_base_table_entries;
-
/* Set to true when the translation tables are initialized. */
bool initialized;
};
@@ -67,12 +45,15 @@
uintptr_t max_va_size;
/*
- * Array of all memory regions stored in order of ascending base_va.
- * The list is terminated by the first entry with
- * size == 0 or when all entries are used (as specified by mmap_num).
+ * Pointer to an array with all the memory regions stored in order
+ * of ascending base_va.
*/
struct xlat_mmap_region *mmap;
- unsigned int mmap_num;
+
+ /*
+ * Number of regions stored in the mmap array.
+ */
+ unsigned int mmap_regions;
/*
* Base address for the virtual space on this context.
@@ -118,13 +99,6 @@
#define XLAT_TABLES_ALIGNMENT XLAT_TABLE_SIZE
/*
- * Align the base tables to page boundary. This migh generate larger tables
- * than needed, but it simplifies the code, which is a good trade-off
- * since we have enough memory.
- */
-#define BASE_XLAT_TABLES_ALIGNMENT XLAT_TABLE_SIZE
-
-/*
* Compute the number of entries required at the initial lookup level to
* address the whole virtual address space.
*/
@@ -140,120 +114,59 @@
/*
* Macro to check if the xlat_ctx_tbls part of a context is valid.
*/
-#define XLAT_TABLES_CTX_TBL_VALID(_ctx) ((_ctx)->tbls != NULL)
+#define XLAT_TABLES_CTX_TBL_VALID(_ctx) ((_ctx)->tbls != NULL)
/*
- * Macro to allocate translation tables to be used within a context.
+ * Function to initialize the configuration structure for a
+ * translation context. This function must be called before
+ * the MMU is enabled.
+ *
+ * Arguments:
+ * - cfg: Pointer to a xlat_ctx_cfg structure to initialize.
+ * - region: xlat_addr_region_id_t descriptor indicating the memory
+ * region for the configured context.
+ * - mm: List of memory map regions to add to the
+ * context configuration.
+ * - mm_regions: Number of memory regions in the mm array.
+ * - va_size: Size of the VA space for the current context.
+ *
+ * Return:
+ * - 0 on success or a negative POSIX error otherwise.
*/
-#define XLAT_CREATE_TABLES(_tblset_name, \
- _xlat_tables_count, \
- _virt_addr_space_size, \
- _tables_section) \
- \
- static uint64_t _tblset_name##_base_xlat_table \
- [(XLAT_TABLE_ENTRIES)] \
- __aligned((BASE_XLAT_TABLES_ALIGNMENT)) \
- __section((_tables_section)); \
- \
- static uint64_t _tblset_name##_xlat_tables[(_xlat_tables_count)]\
- [(XLAT_TABLE_ENTRIES)] \
- __aligned((XLAT_TABLES_ALIGNMENT)) \
- __section((_tables_section)); \
- \
- static struct xlat_ctx_tbls _tblset_name##_tbls = { \
- .tables = _tblset_name##_xlat_tables, \
- .tables_num = (_xlat_tables_count), \
- .next_table = 0, \
- .base_table = _tblset_name##_base_xlat_table, \
- .max_base_table_entries = \
- GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size),\
- .initialized = false \
- } \
+int xlat_ctx_cfg_init(struct xlat_ctx_cfg *cfg,
+ xlat_addr_region_id_t region,
+ struct xlat_mmap_region *mm,
+ unsigned int mm_regions,
+ size_t va_size);
/*
- * Macro used to define the xlat_ctx_cfg and xlat_mmap_region array
- * associated with a context.
+ * Initializes the translation context (xlat_ctx) and the xlat_ctx_tbls with
+ * the given xlat_ctx_cfg. The tables are created according to the memory
+ * map description available in the latter and stored in the tables area
+ * pointed by `tables_ptr`.
+ * Must be called before the MMU is enabled.
+ *
+ * Arguments:
+ * - ctx: Pointer to the xlat_ctx structure to initialize.
+ * - cfg: Pointer to the structure containing the context configuration.
+ * This must have already been initialized via xlat_ctx_cfg_init().
+ * - tbls_ctx: Pointer to a xlat_ctx_tbls structure to configure the
+ * associated table data for the translation context.
+ * - tables_ptr: Pointer to the memory for the translation tables,
+ * the memory provided must be page aligned and multiple
+ * of page size.
+ * - ntables: Number of pages passed in the `tables_ptr`.
+ *
+ * Return:
+ * - 0 on success.
+ * - -EALREADY if tbls_ctx is already initialized.
+ * - Negative POSIX error codes on all other errors.
*/
-#define XLAT_REGISTER_VA_SPACE(_ctx_name, _region, _mmap_count, \
- _virt_addr_space_size) \
- COMPILER_ASSERT(((_region) < VA_REGIONS)); \
- COMPILER_ASSERT(((unsigned long)(_virt_addr_space_size) \
- % GRANULE_SIZE) == UL(0)); \
- COMPILER_ASSERT((unsigned long)(_virt_addr_space_size) \
- <= (MAX_VIRT_ADDR_SPACE_SIZE)); \
- \
- static struct xlat_mmap_region _ctx_name##_mmap[(_mmap_count)]; \
- \
- static struct xlat_ctx_cfg _ctx_name##_xlat_ctx_cfg = { \
- .max_va_size = (_virt_addr_space_size), \
- .base_va = 0ULL, \
- .mmap = _ctx_name##_mmap, \
- .mmap_num = (_mmap_count), \
- .max_mapped_va_offset = 0ULL, \
- .max_mapped_pa = 0ULL, \
- .base_level = \
- (GET_XLAT_TABLE_LEVEL_BASE((_virt_addr_space_size))),\
- .region = (_region), \
- .initialized = false \
- }
-
-/*
- * Macro to generate a context and associate the translation table set passed
- * to it by ref.
- */
-#define XLAT_REGISTER_CONTEXT_FULL_SPEC(_ctx_name, _region, _mmap_count,\
- _virt_addr_space_size, \
- _tables_set) \
- XLAT_REGISTER_VA_SPACE(_ctx_name, (_region), \
- (_mmap_count), (_virt_addr_space_size)); \
- \
- static struct xlat_ctx _ctx_name##_xlat_ctx = { \
- .cfg = &(_ctx_name##_xlat_ctx_cfg), \
- .tbls = (_tables_set) \
- }
-
-/*
- * Statically allocate a translation context and associated translation
- * tables. Also initialize them.
- *
- * _ctx_name:
- * Prefix for the translation context variable.
- * E.g. If _ctx_name is 'foo', the variable will be called 'foo_xlat_ctx'.
- * Useful to distinguish multiple contexts from one another.
- *
- * _region:
- * Region mapped by this context (high or low address region).
- * See @xlat_ctx_region_id_t for more info.
- *
- * _mmap_count:
- * Number ofstruct xlat_mmap_region to allocate.
- * Would be defined during the context creation.
- *
- * _xlat_tables_count:
- * Number of non-base tables to allocate at level others than the
- * initial lookup.
- *
- * _virt_addr_space_size:
- * Size (in bytes) of the virtual address space that can be accessed by this
- * context.
- *
- * _section_name:
- * Specify the name of the section where the translation tables have to be
- * placed by the linker.
- */
-#define XLAT_REGISTER_CONTEXT(_ctx_name, _region, _mmap_count, \
- _xlat_tables_count, \
- _virt_addr_space_size, \
- _section_name) \
- XLAT_CREATE_TABLES(_ctx_name, (_xlat_tables_count), \
- (_virt_addr_space_size), \
- (_section_name)); \
- \
- XLAT_REGISTER_CONTEXT_FULL_SPEC(_ctx_name, (_region), \
- (_mmap_count), \
- (_virt_addr_space_size),\
- &(_ctx_name##_tbls))
+int xlat_ctx_init(struct xlat_ctx *ctx,
+ struct xlat_ctx_cfg *cfg,
+ struct xlat_ctx_tbls *tbls_ctx,
+ uint64_t *tables_ptr,
+ unsigned int ntables);
#endif /*__ASSEMBLER__*/
-
#endif /* XLAT_CONTEXTS_H */
diff --git a/lib/xlat/include/xlat_defs.h b/lib/xlat/include/xlat_defs.h
index ca41825..74fa15f 100644
--- a/lib/xlat/include/xlat_defs.h
+++ b/lib/xlat/include/xlat_defs.h
@@ -12,10 +12,6 @@
#include <arch.h>
#include <utils_def.h>
-#define PAGE_SIZE_4KB (1UL << 12)
-#define PAGE_SIZE_16KB (1UL << 14)
-#define PAGE_SIZE_64KB (1UL << 16)
-
/*
* The ARMv8-A architecture allows translation granule sizes of 4KB, 16KB or 64KB.
*
@@ -64,20 +60,15 @@
/*
* In AArch64 state, the MMU may support 4KB, 16KB and 64KB page
- * granularity. For 4KB granularity, a level 0 table descriptor doesn't support
- * block translation. For 16KB, the same thing happens to levels 0 and 1. For
- * 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture
+ * granularity. For 4KB granularity (the only one supported by
+ * this library), a level 0 table descriptor doesn't support
+ * block translation. See section D4.3.1 of the ARMv8-A Architecture
* Reference Manual (DDI 0487A.k) for more information.
*
* The define below specifies the first table level that allows block
* descriptors.
*/
-#if PAGE_SIZE == PAGE_SIZE_4KB
-# define MIN_LVL_BLOCK_DESC U(1)
-#elif (PAGE_SIZE == PAGE_SIZE_16KB) || (PAGE_SIZE == PAGE_SIZE_64KB)
-# define MIN_LVL_BLOCK_DESC U(2)
-#endif
-
+#define MIN_LVL_BLOCK_DESC U(1)
#define XLAT_TABLE_LEVEL_MIN U(0)
/* Mask used to know if an address belongs to a high va region. */
@@ -88,22 +79,22 @@
* state.
*
* TCR.TxSZ is calculated as 64 minus the width of said address space.
- * The value of TCR.TxSZ must be in the range 16 to 39 [1] or 48 [2],
- * depending on Small Translation Table Support which means that
- * the virtual address space width must be in the range 48 to 25 or 16 bits.
+ * The value of TCR.TxSZ must be in the range 16 to 48 [1], which means that
+ * the virtual address space width must be in the range 48 to 16 bits.
*
* [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information:
* Page 1730: 'Input address size', 'For all translation stages'.
- * [2] See section 12.2.55 in the ARMv8-A Architecture Reference Manual
+ * and section 12.2.55 in the ARMv8-A Architecture Reference Manual
* (DDI 0487D.a)
*/
-/* Maximum value of TCR_ELx.T(0,1)SZ is 39 */
+/*
+ * Maximum value of TCR_ELx.T(0,1)SZ is 39 for a min VA size of 16 bits.
+ * RMM is only supported with FEAT_TTST implemented.
+ */
#define MIN_VIRT_ADDR_SPACE_SIZE (UL(1) << (UL(64) - TCR_TxSZ_MAX))
-/* Maximum value of TCR_ELx.T(0,1)SZ is 48 */
-#define MIN_VIRT_ADDR_SPACE_SIZE_TTST \
- (UL(1) << (UL(64) - TCR_TxSZ_MAX_TTST))
+/* Minimum value of TCR_ELx.T(0,1)SZ is 16, for a VA of 48 bits */
#define MAX_VIRT_ADDR_SPACE_SIZE (UL(1) << (UL(64) - TCR_TxSZ_MIN))
/*
diff --git a/lib/xlat/include/xlat_tables.h b/lib/xlat/include/xlat_tables.h
index 3caeb8d..3556880 100644
--- a/lib/xlat/include/xlat_tables.h
+++ b/lib/xlat/include/xlat_tables.h
@@ -181,7 +181,7 @@
/*
* Structure containing a table entry and its related information.
*/
-struct xlat_table_entry {
+struct xlat_tbl_info {
uint64_t *table; /* Pointer to the translation table. */
uintptr_t base_va; /* Context base VA for the current entry. */
unsigned int level; /* Table level of the current entry. */
@@ -192,63 +192,23 @@
* Generic translation table APIs.
*****************************************************************************/
-static inline void xlat_write_descriptor(uint64_t *entry, uint64_t desc)
+static inline void xlat_write_tte(uint64_t *entry, uint64_t desc)
{
SCA_WRITE64(entry, desc);
}
-static inline uint64_t xlat_read_descriptor(uint64_t *entry)
+static inline uint64_t xlat_read_tte(uint64_t *entry)
{
return SCA_READ64(entry);
}
/*
- * Initialize translation tables (and mark xlat_ctx_cfg as initialized if
- * not already initialized) associated to the current context.
- *
- * The struct xlat_ctx_cfg of the context might be shared with other
- * contexts that might have already initialized it. This is expected and
- * should not cause any problem.
- *
- * This function assumes that the xlat_ctx_cfg field of the context has been
- * properly configured by previous calls to xlat_mmap_add_region_ctx().
- *
- * This function returns 0 on success or an error code otherwise.
- */
-int xlat_init_tables_ctx(struct xlat_ctx *ctx);
-
-/*
- * Add a memory region with defined base PA and base VA. This function can only
- * be used before marking the xlat_ctx_cfg for the current xlat_ctx as
- * initialized.
- *
- * The region cannot be removed once added.
- *
- * This function returns 0 on success or an error code otherwise.
- */
-int xlat_mmap_add_region_ctx(struct xlat_ctx *ctx,
- struct xlat_mmap_region *mm);
-
-/*
- * Add an array of memory regions with defined base PA and base VA.
- * This function needs to be called before initialiting the xlat_ctx_cfg.
- * Setting the `last` argument to true will initialise the xlat_ctx_cfg.
- *
- * The regions cannot be removed once added.
- *
- * Return 0 on success or a negative error code otherwise.
- */
-int xlat_mmap_add_ctx(struct xlat_ctx *ctx,
- struct xlat_mmap_region *mm,
- bool last);
-
-/*
* Return a table entry structure given a context and a VA.
* The return structure is populated on the retval field.
*
* This function returns 0 on success or a negative error code otherwise.
*/
-int xlat_get_table_from_va(struct xlat_table_entry * const retval,
+int xlat_get_table_from_va(struct xlat_tbl_info * const retval,
const struct xlat_ctx * const ctx,
const uintptr_t va);
@@ -260,7 +220,7 @@
*
* This function returns 0 on success or a negative error code otherwise.
*/
-int xlat_unmap_memory_page(struct xlat_table_entry * const table,
+int xlat_unmap_memory_page(struct xlat_tbl_info * const table,
const uintptr_t va);
/*
@@ -271,7 +231,7 @@
*
* This function returns 0 on success or a negative error code otherwise.
*/
-int xlat_map_memory_page_with_attrs(const struct xlat_table_entry * const table,
+int xlat_map_memory_page_with_attrs(const struct xlat_tbl_info * const table,
const uintptr_t va,
const uintptr_t pa,
const uint64_t attrs);
@@ -281,8 +241,8 @@
* table entry structure and the VA for that descriptor.
*
*/
-uint64_t *xlat_get_pte_from_table(const struct xlat_table_entry * const table,
- const uintptr_t va);
+uint64_t *xlat_get_tte_ptr(const struct xlat_tbl_info * const table,
+ const uintptr_t va);
/*
* Set up the MMU configuration registers for the specified platform parameters.
@@ -301,30 +261,5 @@
/* MMU control */
void xlat_enable_mmu_el2(void);
-/*
- * Returns true if the xlat_ctx_cfg field in the xlat_ctx is initialized.
- */
-bool xlat_ctx_cfg_initialized(const struct xlat_ctx * const ctx);
-
-/*
- * Returns true if the translation tables on the current context are already
- * initialized or false otherwise.
- */
-bool xlat_ctx_tbls_initialized(const struct xlat_ctx * const ctx);
-
-/*
- * Initialize a context dynamically at runtime using the given xlat_ctx_cfg
- * and xlat_ctx_tbls structures.
- *
- * Return 0 if success or a Posix erro code otherwise.
- */
-int xlat_ctx_create_dynamic(struct xlat_ctx *ctx,
- struct xlat_ctx_cfg *cfg,
- struct xlat_ctx_tbls *tbls,
- void *base_tables,
- unsigned int base_level_entries,
- void *tables_ptr,
- unsigned int ntables);
-
#endif /*__ASSEMBLER__*/
#endif /* XLAT_TABLES_H */
diff --git a/lib/xlat/src/xlat_contexts.c b/lib/xlat/src/xlat_contexts.c
new file mode 100644
index 0000000..d5f7c4a
--- /dev/null
+++ b/lib/xlat/src/xlat_contexts.c
@@ -0,0 +1,302 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <utils_def.h>
+#include <xlat_contexts.h>
+#include <xlat_defs.h>
+#include <xlat_tables.h>
+#include <xlat_tables_private.h>
+
+/*
+ * Function that verifies that a memory map array is valid.
+ * Returns:
+ * 0: Success, the memory map array is valid.
+ * EINVAL: Invalid values were used as arguments.
+ * ERANGE: The memory limits were surpassed.
+ * EPERM: Region overlaps another one in an invalid way or is in an
+ * incorrect order.
+ */
+static int validate_mmap_regions(struct xlat_mmap_region *mm,
+ unsigned int mm_regions,
+ uintptr_t ctx_base_va, size_t va_size,
+ xlat_addr_region_id_t region)
+{
+ uintptr_t base_pa;
+ uintptr_t base_va;
+ size_t size;
+ size_t granularity;
+ uintptr_t end_pa, mm_end_pa;
+ uintptr_t end_va, previous_end_va;
+
+ if (mm == NULL) {
+ return -EINVAL;
+ }
+
+ if (mm_regions == 0U) {
+ return -EINVAL;
+ }
+
+ for (unsigned int i = 0U; i < mm_regions; i++) {
+ size = mm[i].size;
+ granularity = mm[i].granularity;
+ base_pa = mm[i].base_pa;
+ base_va = mm[i].base_va;
+ end_pa = base_pa + size - 1UL;
+ end_va = base_va + size - 1UL;
+
+ if (region == VA_LOW_REGION) {
+ if ((base_va & HIGH_REGION_MASK) ||
+ ((base_va + size) & HIGH_REGION_MASK)) {
+ ERROR("%s (%u): Base VA and address space do not match: ",
+ __func__, __LINE__);
+ ERROR("Base va = 0x%lx, Address space = Low region\n",
+ base_va);
+ return -EINVAL;
+ }
+ } else {
+ if (base_va < ctx_base_va) {
+ ERROR("%s (%u): Base VA is not aligned with high region start: ",
+ __func__, __LINE__);
+ ERROR("Base VA = 0x%lx, high region start VA = 0x%lx\n",
+ base_va, ctx_base_va);
+ return -EINVAL;
+ }
+ /*
+ * If this context is handling the high half region of the VA,
+ * adjust the start address of this area by substracting the
+ * start address of the region as the table entries are
+ * relative to the latter. Once ttbr1_el2 is configured, the
+ * MMU will translate the addresses properly.
+ */
+ mm[i].base_va -= ctx_base_va;
+ base_va = mm[i].base_va;
+ end_va = base_va + mm[i].size;
+ }
+
+ if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
+ !IS_PAGE_ALIGNED(size)) {
+ return -EFAULT;
+ }
+
+ if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
+ (granularity != XLAT_BLOCK_SIZE(2U)) &&
+ (granularity != XLAT_BLOCK_SIZE(3U))) {
+ return -EINVAL;
+ }
+
+ /* Check for overflows */
+ if ((base_pa > end_pa) || (base_va > end_va)) {
+ return -ERANGE;
+ }
+
+ /*
+ * end_va is calculated as an offset with regards to the base
+ * address for the current context, so compare it against
+ * max_va_size to ensure we are within the allowed range.
+ */
+ if (end_va > va_size) {
+ return -ERANGE;
+ }
+
+ if (end_pa > xlat_arch_get_max_supported_pa()) {
+ return -ERANGE;
+ }
+
+ if (i > 0U) {
+ if (base_va < mm[i - 1U].base_va) {
+ /* Incorrect order */
+ return -EPERM;
+ }
+
+ /*
+ * Check that the PA and the VA do not
+ * overlap with the ones on the previous region.
+ */
+ previous_end_va = mm[i - 1U].base_va +
+ mm[i - 1U].size - 1UL;
+
+ /* No overlaps with VAs of previous regions */
+ if (base_va <= previous_end_va) {
+ return -EPERM;
+ }
+
+ /* No overlaps with PAs of previous regions */
+ for (unsigned int j = 0; j < i; j++) {
+ mm_end_pa = mm[j].base_pa + mm[j].size - 1UL;
+
+ if ((end_pa >= mm[j].base_pa) &&
+ (end_pa <= mm_end_pa)) {
+ return -EPERM;
+ }
+
+ if ((base_pa >= mm[j].base_pa) &&
+ (base_pa <= mm_end_pa)) {
+ return -EPERM;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int add_mmap_to_ctx_cfg(struct xlat_ctx_cfg *cfg,
+ xlat_addr_region_id_t region,
+ struct xlat_mmap_region *mm,
+ unsigned int mm_regions,
+ size_t va_size)
+{
+ int ret;
+ uintptr_t end_pa;
+
+ if (region == VA_LOW_REGION) {
+ /*
+ * Initialize the base_va for the current context if not
+ * initialized yet.
+ *
+ * For the low region, the architecture mandates that
+ * base_va has to be 0.
+ *
+ * Overwriting this field should not be a problem as its value
+ * is expected to be always the same.
+ */
+ cfg->base_va = 0ULL;
+ } else {
+ /*
+ * Initialize the base_va for the current context if not
+ * initialized yet.
+ *
+ * For the high region, the architecture mandates that
+ * base_va has to be 0xFFFF-FFFF-FFFF-FFFF minus the VA space
+ * size plus one.
+ *
+ * Overwriting this field should not be a problem as its value
+ * is expected to be always the same.
+ */
+ cfg->base_va = (ULONG_MAX - va_size + 1ULL);
+ }
+
+
+ ret = validate_mmap_regions(mm, mm_regions, cfg->base_va,
+ va_size, region);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ /* Adjust the cfg parameters which depend from the mmap regions */
+ cfg->max_mapped_pa = 0ULL;
+ for (unsigned int i = 0U; i < mm_regions; i++) {
+ end_pa = mm[i].base_pa + mm[i].size - 1ULL;
+ if (end_pa > cfg->max_mapped_pa) {
+ cfg->max_mapped_pa = end_pa;
+ }
+ }
+ cfg->max_mapped_va_offset = mm[mm_regions - 1U].base_va +
+ mm[mm_regions - 1U].size - 1ULL;
+ cfg->mmap = mm;
+ cfg->mmap_regions = mm_regions;
+
+ return 0;
+}
+
+int xlat_ctx_cfg_init(struct xlat_ctx_cfg *cfg,
+ xlat_addr_region_id_t region,
+ struct xlat_mmap_region *mm,
+ unsigned int mm_regions,
+ size_t va_size)
+{
+ int retval;
+
+ if (cfg == NULL) {
+ return -EINVAL;
+ }
+
+ if (mm == NULL) {
+ return -EINVAL;
+ }
+
+ if (region >= VA_REGIONS) {
+ return -EINVAL;
+ }
+
+ if ((va_size % GRANULE_SIZE) != 0ULL) {
+ return -EINVAL;
+ }
+
+ if ((va_size > MAX_VIRT_ADDR_SPACE_SIZE) ||
+ (va_size < MIN_VIRT_ADDR_SPACE_SIZE)) {
+ return -EINVAL;
+ }
+
+ if (cfg->initialized == true) {
+ return -EALREADY;
+ }
+
+ retval = add_mmap_to_ctx_cfg(cfg, region, mm, mm_regions, va_size);
+
+ if (retval < 0) {
+ return retval;
+ }
+
+ cfg->max_va_size = va_size;
+ cfg->base_level = (GET_XLAT_TABLE_LEVEL_BASE(va_size));
+ cfg->region = region;
+ cfg->initialized = true;
+
+ flush_dcache_range((uintptr_t)cfg, sizeof(struct xlat_ctx_cfg));
+
+ return 0;
+}
+
+int xlat_ctx_init(struct xlat_ctx *ctx,
+ struct xlat_ctx_cfg *cfg,
+ struct xlat_ctx_tbls *tbls_ctx,
+ uint64_t *tables_ptr,
+ unsigned int ntables)
+{
+ if ((ctx == NULL) || (tbls_ctx == NULL) || (cfg == NULL)) {
+ return -EINVAL;
+ }
+
+ if (tables_ptr == NULL || ntables == 0U) {
+ return -EINVAL;
+ }
+
+ if (ALIGNED(tables_ptr, XLAT_TABLES_ALIGNMENT) == false) {
+ return -EINVAL;
+ }
+
+ if (tbls_ctx->initialized == true) {
+ return -EALREADY;
+ }
+
+ if (cfg->initialized == false) {
+ return -EINVAL;
+ }
+
+ /* Add the configuration to the context */
+ ctx->cfg = cfg;
+
+ /* Initialize the tables structure */
+ XLAT_INIT_CTX_TBLS(tbls_ctx, tables_ptr, ntables);
+
+ /* Add the tables to the context */
+ ctx->tbls = tbls_ctx;
+
+ flush_dcache_range((uintptr_t)ctx, sizeof(struct xlat_ctx));
+ flush_dcache_range((uintptr_t)tbls_ctx, sizeof(struct xlat_ctx_tbls));
+ flush_dcache_range((uintptr_t)cfg, sizeof(struct xlat_ctx_cfg));
+
+ return xlat_init_tables_ctx(ctx);
+}
diff --git a/lib/xlat/src/xlat_tables_arch.c b/lib/xlat/src/xlat_tables_arch.c
index 1ed6c96..77a44d1 100644
--- a/lib/xlat/src/xlat_tables_arch.c
+++ b/lib/xlat/src/xlat_tables_arch.c
@@ -79,7 +79,7 @@
return -EINVAL;
}
- if (xlat_ctx_cfg_initialized(ctx) == false) {
+ if (ctx->cfg->initialized == false) {
return -EINVAL;
}
@@ -152,7 +152,7 @@
* Set TTBR bits as well and enable CnP bit so as to share page
* tables with all PEs.
*/
- ttbrx = (uint64_t)(void *)ctx_tbls->base_table;
+ ttbrx = (uint64_t)(void *)ctx_tbls->tables;
/*
* The VA region is not common for the HIGH region as it is used
diff --git a/lib/xlat/src/xlat_tables_core.c b/lib/xlat/src/xlat_tables_core.c
index e2c8a33..38443f2 100644
--- a/lib/xlat/src/xlat_tables_core.c
+++ b/lib/xlat/src/xlat_tables_core.c
@@ -8,9 +8,9 @@
#include <arch_features.h>
#include <arch_helpers.h>
+#include <assert.h>
#include <debug.h>
#include <errno.h>
-#include <limits.h>
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
@@ -51,7 +51,9 @@
static inline uint64_t *xlat_table_get_empty(struct xlat_ctx *ctx)
{
assert(ctx->tbls->next_table < ctx->tbls->tables_num);
- return ctx->tbls->tables[ctx->tbls->next_table++];
+
+ return &ctx->tbls->tables[XLAT_TABLE_ENTRIES *
+ ctx->tbls->next_table++];
}
/*
@@ -286,7 +288,6 @@
table_idx_va, level);
if (action == ACTION_WRITE_BLOCK_ENTRY) {
-
table_base[table_idx] =
xlat_desc(mm->attr, table_idx_pa, level);
@@ -350,87 +351,6 @@
}
/*
- * Function that verifies that a region can be mapped.
- * Returns:
- * 0: Success, the mapping is allowed.
- * EINVAL: Invalid values were used as arguments.
- * ERANGE: The memory limits were surpassed.
- * ENOMEM: There is not enough memory in the mmap array.
- * EPERM: Region overlaps another one in an invalid way.
- * EALREADY: The context configuration is already marked as initialized.
- */
-static int mmap_add_region_check(const struct xlat_ctx *ctx,
- const struct xlat_mmap_region *mm)
-{
- uintptr_t base_pa = mm->base_pa;
- uintptr_t base_va = mm->base_va;
- size_t size = mm->size;
- size_t granularity = mm->granularity;
- uintptr_t end_pa = base_pa + size - 1UL;
- uintptr_t end_va = base_va + size - 1UL;
- unsigned int index;
- struct xlat_ctx_cfg *ctx_cfg = ctx->cfg;
-
- if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
- !IS_PAGE_ALIGNED(size)) {
- return -EFAULT;
- }
-
- if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
- (granularity != XLAT_BLOCK_SIZE(2U)) &&
- (granularity != XLAT_BLOCK_SIZE(3U))) {
- return -EINVAL;
- }
-
- /* Check for overflows */
- if ((base_pa > end_pa) || (base_va > end_va)) {
- return -ERANGE;
- }
-
- /*
- * end_va is calculated as an offset with regards to the base address
- * for the current context, so compare it against max_va_size to ensure
- * we are within the allowed range.
- */
- if (end_va > ctx_cfg->max_va_size) {
- return -ERANGE;
- }
-
- if (end_pa > xlat_arch_get_max_supported_pa()) {
- return -ERANGE;
- }
-
- /* Check that there is space in the ctx->mmap array */
- if (ctx_cfg->mmap[ctx_cfg->mmap_num - 1U].size != 0UL) {
- return -ENOMEM;
- }
-
- /* Check for PAs and VAs overlaps with all other regions in this context */
- index = 0U;
- while ((index < ctx_cfg->mmap_num) &&
- (ctx_cfg->mmap[index].size != 0UL)) {
- uintptr_t mm_cursor_end_va = ctx_cfg->mmap[index].base_va +
- ctx_cfg->mmap[index].size - 1UL;
-
- unsigned long long mm_cursor_end_pa =
- ctx_cfg->mmap[index].base_pa
- + ctx_cfg->mmap[index].size - 1UL;
-
- bool separated_pa = (end_pa < ctx_cfg->mmap[index].base_pa) ||
- (base_pa > mm_cursor_end_pa);
- bool separated_va = (end_va < ctx_cfg->mmap[index].base_va) ||
- (base_va > mm_cursor_end_va);
-
- if (!separated_va || !separated_pa) {
- return -EPERM;
- }
- ++index;
- }
-
- return 0;
-}
-
-/*
* Returns a block/page table descriptor for the given level and attributes.
*/
uint64_t xlat_desc(uint64_t attr, uintptr_t addr_pa, unsigned int level)
@@ -558,277 +478,14 @@
return desc;
}
-/*****************************************************************************
- * Public part of the core translation library.
- ****************************************************************************/
-
-/*
- * Add a memory region with defined base PA and base VA. This function can only
- * be used before marking the xlat_ctx_cfg for the current xlat_ctx as
- * initialized.
- *
- * The region cannot be removed once added.
- *
- * This function returns 0 on success or an error code otherwise.
- */
-int xlat_mmap_add_region_ctx(struct xlat_ctx *ctx,
- struct xlat_mmap_region *mm)
-{
- unsigned int mm_last_idx = 0U;
- unsigned int mm_cursor_idx = 0U;
- uintptr_t end_pa;
- uintptr_t end_va;
- struct xlat_ctx_cfg *ctx_cfg;
- struct xlat_ctx_tbls *ctx_tbls;
- int ret;
-
- if (ctx == NULL) {
- return -EINVAL;
- }
-
- ctx_cfg = ctx->cfg;
- ctx_tbls = ctx->tbls;
-
- if (ctx_cfg == NULL || ctx_tbls == NULL) {
- return -EINVAL;
- }
-
- if (mm == NULL) {
- return -EINVAL;
- }
-
- /* The context data cannot be initialized */
- if (xlat_ctx_cfg_initialized(ctx) == true) {
- return -EINVAL;
- }
-
- /* Memory regions must be added before initializing the xlat tables. */
- assert(ctx_tbls->initialized == false);
-
- /* Ignore empty regions */
- if (mm->size == 0UL) {
- return 0;
- }
-
- if (ctx_cfg->region == VA_LOW_REGION) {
- /*
- * Initialize the base_va for the current context if not
- * initialized yet.
- *
- * For the low region, the architecture mandates that
- * base_va has to be 0.
- *
- * Overwriting this field should not be a problem as its value
- * is expected to be always the same.
- */
- ctx_cfg->base_va = 0ULL;
-
- if ((mm->base_va & HIGH_REGION_MASK) ||
- ((mm->base_va + mm->size) & HIGH_REGION_MASK)) {
- ERROR("%s (%u): Base VA and address space do not match: ",
- __func__, __LINE__);
- ERROR("Base va = 0x%lx, Address space = Low region\n",
- mm->base_va);
- return -EINVAL;
- }
- } else {
- /*
- * Initialize the base_va for the current context if not
- * initialized yet.
- *
- * For the high region, the architecture mandates that
- * base_va has to be 0xFFFF-FFFF-FFFF-FFFF minus the VA space
- * size plus one.
- *
- * Overwriting this field should not be a problem as its value
- * is expected to be always the same.
- */
- ctx_cfg->base_va = (ULONG_MAX - ctx_cfg->max_va_size + 1ULL);
-
- if (mm->base_va < ctx_cfg->base_va) {
- ERROR("%s (%u): Base VA is not aligned with the high region start: ",
- __func__, __LINE__);
- ERROR("Base VA = 0x%lx, high region start VA = 0x%lx\n",
- mm->base_va, ctx_cfg->base_va);
- return -EINVAL;
- }
-
- /*
- * If this context is handling the high half region of the VA,
- * adjust the start address of this area by substracting the
- * start address of the region as the table entries are
- * relative to the latter. Once ttbr1_el2 is configured, the
- * MMU will translate the addresses properly.
- */
- mm->base_va -= ctx_cfg->base_va;
- }
-
- end_pa = mm->base_pa + mm->size - 1UL;
- end_va = mm->base_va + mm->size - 1UL;
-
- ret = mmap_add_region_check(ctx, mm);
- if (ret != 0) {
- ERROR("%s (%u): mmap_add_region_check() failed. error %d\n",
- __func__, __LINE__, ret);
- return ret;
- }
-
- /*
- * Find correct place in mmap to insert new region.
- * Overlapping is not allowed.
- */
- while (((ctx_cfg->mmap[mm_cursor_idx].base_va) < mm->base_va)
- && (ctx_cfg->mmap[mm_cursor_idx].size != 0UL)
- && (mm_cursor_idx < ctx_cfg->mmap_num)) {
- ++mm_cursor_idx;
- }
-
- /*
- * Find the last entry marker in the mmap
- */
- while ((mm_last_idx < ctx_cfg->mmap_num) &&
- (ctx_cfg->mmap[mm_last_idx].size != 0UL)) {
- ++mm_last_idx;
- }
-
- /*
- * Check if we have enough space in the memory mapping table.
- * This shouldn't happen as we have checked in mmap_add_region_check
- * that there is free space.
- */
- assert(ctx_cfg->mmap[mm_last_idx].size == 0UL);
-
- /*
- * Make room for new region by moving other regions up by one place.
- */
- (void)memmove((void *)(&ctx_cfg->mmap[mm_cursor_idx + 1U]),
- (void *)(&ctx_cfg->mmap[mm_cursor_idx]),
- sizeof(struct xlat_mmap_region) *
- (mm_last_idx - mm_cursor_idx));
-
- /* Store the memory mapping information into the context. */
- (void)memcpy((void *)(&ctx_cfg->mmap[mm_cursor_idx]), (void *)mm,
- sizeof(struct xlat_mmap_region));
-
- if (end_pa > ctx_cfg->max_mapped_pa) {
- ctx_cfg->max_mapped_pa = end_pa;
- }
-
- if (end_va > ctx_cfg->max_mapped_va_offset) {
- ctx_cfg->max_mapped_va_offset = end_va;
- }
-
- return 0;
-}
-
-/*
- * Add an array of memory regions with defined base PA and base VA.
- * This function needs to be called before initialiting the xlat_ctx_cfg.
- * Setting the `last` argument to true will initialise the xlat_ctx_cfg.
- *
- * The regions cannot be removed once added.
- *
- * Return 0 on success or a negative error code otherwise.
- */
-int xlat_mmap_add_ctx(struct xlat_ctx *ctx,
- struct xlat_mmap_region *mm,
- bool last)
-{
- if ((ctx == NULL) || (mm == NULL)) {
- return -EINVAL;
- }
-
- struct xlat_mmap_region *mm_cursor = mm;
-
- while (mm_cursor->size != 0UL) {
- int retval;
-
- retval = xlat_mmap_add_region_ctx(ctx, mm_cursor);
- if (retval != 0) {
- /*
- * In case of error, stop an return.
- * Note, the context might be in an invalid
- * state and it will need to be restarted.
- */
- return retval;
- }
- mm_cursor++;
- }
-
- if (last) {
- /*
- * Mark the configuration part of the context as initialized.
- * From this point on, no more memory mapping areas can be
- * added to this context (or any other sharing the same
- * configuration).
- */
- ctx->cfg->initialized = true;
- flush_dcache_range((uintptr_t)(void *)ctx->cfg,
- sizeof(struct xlat_ctx_cfg));
-
- }
-
-#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
- VERBOSE("Runtime mapings");
- if (ctx->cfg->region == VA_LOW_REGION) {
- VERBOSE("(Low Region):\n");
- } else {
- VERBOSE("(High Region):\n");
- }
-
- for (unsigned int i = 0U; i < ctx->cfg->mmap_num; i++) {
- VERBOSE("\tRegion: 0x%lx - 0x%lx has attributes 0x%lx\n",
- ctx->cfg->mmap[i].base_va,
- ctx->cfg->mmap[i].base_va + ctx->cfg->mmap[i].size - 1U,
- ctx->cfg->mmap[i].attr);
- }
-#endif /* LOG_LEVEL_VERBOSE */
-
- return 0;
-}
-
-/*
- * Initialize translation tables (and mark xlat_ctx_cfg as initialized if
- * not already initialized) associated to the current context.
- *
- * The struct xlat_ctx_cfg of the context might be shared with other
- * contexts that might have already initialized it. This is expected and
- * should not cause any problem.
- *
- * This function assumes that the xlat_ctx_cfg field of the context has been
- * properly configured by previous calls to xlat_mmap_add_region_ctx().
- *
- * This function returns 0 on success or an error code otherwise.
- */
int xlat_init_tables_ctx(struct xlat_ctx *ctx)
{
struct xlat_ctx_cfg *ctx_cfg;
struct xlat_ctx_tbls *ctx_tbls;
- unsigned int index;
-
- if (ctx == NULL) {
- return -EINVAL;
- }
ctx_cfg = ctx->cfg;
ctx_tbls = ctx->tbls;
- if (ctx_cfg == NULL || ctx_tbls == NULL) {
- return -EINVAL;
- }
-
- if (xlat_ctx_tbls_initialized(ctx)) {
- VERBOSE("%s (%u): Translation tables already initialized\n",
- __func__, __LINE__);
- return -EALREADY;
- }
-
- if (!xlat_ctx_cfg_initialized(ctx)) {
- VERBOSE("%s (%u): Translation context configuration not initialized\n",
- __func__, __LINE__);
- return -EINVAL;
- }
-
if (is_mmu_enabled() == true) {
ERROR("%s (%u): MMU is already enabled\n", __func__, __LINE__);
return -EINVAL;
@@ -840,42 +497,37 @@
* All tables must be zeroed/initialized before mapping any region
* as they are allocated outside the .bss area.
*/
- for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++) {
- ctx_tbls->base_table[i] = INVALID_DESC;
- }
-
for (unsigned int j = 0; j < ctx_tbls->tables_num; j++) {
for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++) {
- ctx_tbls->tables[j][i] = INVALID_DESC;
+ ctx_tbls->tables[(j * XLAT_TABLE_ENTRIES) + i] =
+ INVALID_DESC;
}
}
- index = 0U;
- while ((index < ctx_cfg->mmap_num) &&
- (ctx_cfg->mmap[index].size != 0UL)) {
+ /*
+ * Use the first table as table base and setup the
+ * next available table.
+ */
+ ctx_tbls->next_table++;
+ for (unsigned int i = 0U; i < ctx->cfg->mmap_regions; i++) {
uintptr_t end_va = xlat_tables_map_region(ctx,
- &ctx_cfg->mmap[index],
- 0U,
- ctx_tbls->base_table,
- ctx_tbls->max_base_table_entries,
+ &ctx_cfg->mmap[i], 0U,
+ ctx_tbls->tables,
+ XLAT_TABLE_ENTRIES,
ctx_cfg->base_level);
- if (end_va != (ctx_cfg->mmap[index].base_va +
- ctx_cfg->mmap[index].size - 1UL)) {
+ if (end_va != (ctx_cfg->mmap[i].base_va +
+ ctx_cfg->mmap[i].size - 1UL)) {
ERROR("%s (%u): Not enough memory to map region: "
" VA:0x%lx PA:0x%lx size:0x%zx attr:0x%lx\n",
- __func__, __LINE__, ctx_cfg->mmap[index].base_va,
- ctx_cfg->mmap[index].base_pa,
- ctx_cfg->mmap[index].size,
- ctx_cfg->mmap[index].attr);
+ __func__, __LINE__, ctx_cfg->mmap[i].base_va,
+ ctx_cfg->mmap[i].base_pa,
+ ctx_cfg->mmap[i].size,
+ ctx_cfg->mmap[i].attr);
return -ENOMEM;
}
-
- ++index;
}
/* Flush the cache as a good measure */
- flush_dcache_range((uintptr_t)(void *)ctx_tbls->base_table,
- sizeof(uint64_t) * XLAT_TABLE_ENTRIES);
flush_dcache_range((uintptr_t)(void *)ctx_tbls->tables,
sizeof(uint64_t) * (unsigned long)ctx_tbls->tables_num
* XLAT_TABLE_ENTRIES);
@@ -884,67 +536,8 @@
flush_dcache_range((uintptr_t)(void *)ctx_tbls,
sizeof(struct xlat_ctx_tbls));
- flush_dcache_range((uintptr_t)(void *)ctx, sizeof(struct xlat_ctx));
xlat_tables_print(ctx);
return 0;
}
-
-/*
- * Initialize a context dynamically at runtime using the given xlat_ctx_cfg
- * and xlat_ctx_tbls structures.
- *
- * Return 0 if success or a Posix erro code otherwise.
- */
-int xlat_ctx_create_dynamic(struct xlat_ctx *ctx,
- struct xlat_ctx_cfg *cfg,
- struct xlat_ctx_tbls *tbls,
- void *base_tables,
- unsigned int base_level_entries,
- void *tables_ptr,
- unsigned int ntables)
-{
- if (ctx == NULL) {
- return -EINVAL;
- }
-
- if (XLAT_TABLES_CTX_CFG_VALID(ctx) &&
- XLAT_TABLES_CTX_TBL_VALID(ctx)) {
- return -EALREADY;
- }
-
- /* Add the configuration to the context */
- XLAT_SETUP_CTX_CFG(ctx, cfg);
-
- /* Initialize the tables structure */
- XLAT_INIT_CTX_TBLS(tbls, tables_ptr, ntables,
- base_tables, base_level_entries);
-
- /* Add the tables to the context */
- XLAT_SETUP_CTX_TBLS(ctx, tbls);
-
- return 0;
-}
-
-/*
- * Returns true if the context is already initialized and false otherwise.
- * This function only takes into account whether xlat_ctx_cfg is initialized.
- */
-bool xlat_ctx_cfg_initialized(const struct xlat_ctx * const ctx)
-{
- assert(ctx != NULL);
- assert(ctx->cfg != NULL);
- return ctx->cfg->initialized;
-}
-
-/*
- * Returns true if the translation tables on the current context are already
- * initialized or false otherwise.
- */
-bool xlat_ctx_tbls_initialized(const struct xlat_ctx * const ctx)
-{
- assert(ctx != NULL);
- assert(ctx->tbls != NULL);
- return ctx->tbls->initialized;
-}
diff --git a/lib/xlat/src/xlat_tables_private.h b/lib/xlat/src/xlat_tables_private.h
index 79cf8a8..f40513e 100644
--- a/lib/xlat/src/xlat_tables_private.h
+++ b/lib/xlat/src/xlat_tables_private.h
@@ -12,6 +12,16 @@
#include <stdbool.h>
#include <xlat_contexts.h>
+/*
+ * Initialize translation tables associated to the current context.
+ *
+ * This function assumes that the xlat_ctx_cfg field of the context has been
+ * properly configured by previous calls to xlat_ctx_cfg_init().
+ *
+ * This function returns 0 on success or an error code otherwise.
+ */
+int xlat_init_tables_ctx(struct xlat_ctx *ctx);
+
/* Determine the physical address space encoded in the 'attr' parameter. */
uint64_t xlat_arch_get_pas(uint64_t attr);
@@ -72,55 +82,23 @@
#define XLAT_GET_NG_HINT() (LOWER_ATTRS(NG_HINT))
/*
- * Set up the xlat_ctx_cfg field of a given context.
- * This macro doesn't check parameters.
- *
- * _ctx:
- * Pointer to xlat_ctx.
- *
- * _cfg:
- * reference to xlat_ctx_cfg that needs to be set to _ctx.
- */
-#define XLAT_SETUP_CTX_CFG(_ctx, _cfg) (_ctx)->cfg = (_cfg)
-
-/*
- * Set up the xlat_ctx_tbls field of a given context.
- * This macro doesn't check parameters.
- *
- * _ctx:
- * Context where to add the configuration strucutre.
- *
- * _tlbs:
- * Reference to the xlat_ctx_tlbs structure where to add to the context.
- */
-#define XLAT_SETUP_CTX_TBLS(_ctx, _tbls) (_ctx)->tbls = (_tbls)
-
-/*
* Initialize an existing xlat_ctx_tbls structure with the given parameters
* This macro doesn't check parameters.
*
* _tlbs:
- * Pointer to xlat_ctx.
+ * Pointer to xlat_ctx_tlbs structure.
*
* _tables:
- * pointer to non-base xlat_ctx_tbls.
+ * pointer to a translation table array containing all the translation
+ * tables.
*
* _tnum:
- * Maximum number of intermediate tables that can fit in the _tables area.
- *
- * _btables:
- * pointer to base xlat_ctx_tbls.
- *
- * _bt_entries:
- * Maximum number of entries available on the base table.
+ * Maximum number of tables that can fit in the _tables area.
*/
-#define XLAT_INIT_CTX_TBLS(_tbls, _tables, _tnum, \
- _btables, _bt_entries) \
+#define XLAT_INIT_CTX_TBLS(_tbls, _tables, _tnum) \
{ \
(_tbls)->tables = (_tables); \
(_tbls)->tables_num = (_tnum); \
- (_tbls)->base_table = (_btables); \
- (_tbls)->max_base_table_entries = (_bt_entries); \
(_tbls)->next_table = 0U; \
(_tbls)->initialized = false; \
}
diff --git a/lib/xlat/src/xlat_tables_utils.c b/lib/xlat/src/xlat_tables_utils.c
index 93cda40..4c67cd3 100644
--- a/lib/xlat/src/xlat_tables_utils.c
+++ b/lib/xlat/src/xlat_tables_utils.c
@@ -7,6 +7,7 @@
/* This file is derived from xlat_table_v2 library in TF-A project */
#include <arch_helpers.h>
+#include <assert.h>
#include <debug.h>
#include <errno.h>
#include <stdbool.h>
@@ -40,7 +41,7 @@
{
VERBOSE("mmap:\n");
- for (unsigned int i = 0U; i < ctx->cfg->mmap_num; i++) {
+ for (unsigned int i = 0U; i < ctx->cfg->mmap_regions; i++) {
uintptr_t base_va;
base_va = ((ctx->cfg->region == VA_LOW_REGION) ?
@@ -108,9 +109,10 @@
* Recursive function that reads the translation tables passed as an argument
* and prints their status.
*/
-static void xlat_tables_print_internal(struct xlat_ctx *ctx, uintptr_t table_base_va,
- const uint64_t *table_base, unsigned int table_entries,
- unsigned int level)
+static void xlat_tables_print_internal(struct xlat_ctx *ctx,
+ uintptr_t table_base_va,
+ const uint64_t *table_base,
+ unsigned int level)
{
uint64_t *addr_inner;
unsigned int invalid_row_count;
@@ -140,7 +142,7 @@
*/
invalid_row_count = 0U;
- while (table_idx < table_entries) {
+ while (table_idx < XLAT_TABLE_ENTRIES) {
uint64_t desc;
desc = table_base[table_idx];
@@ -184,8 +186,7 @@
/* FIXME: Recursion. */
xlat_tables_print_internal(ctx, table_idx_va,
- addr_inner, XLAT_TABLE_ENTRIES,
- level + 1U);
+ addr_inner, level + 1U);
} else {
VERBOSE("%sVA:0x%lx PA:0x%lx size:0x%zx ",
level_spacers[level], table_idx_va,
@@ -224,7 +225,7 @@
VERBOSE(" Max allowed PA: 0x%lx\n", xlat_arch_get_max_supported_pa());
VERBOSE(" Max allowed VA: 0x%lx\n", max_allowed_va);
VERBOSE(" Max mapped PA: 0x%lx", ctx_cfg->max_mapped_pa);
- for (unsigned int i = 0U; i < ctx_cfg->mmap_num; i++) {
+ for (unsigned int i = 0U; i < ctx->cfg->mmap_regions; i++) {
if (ctx_cfg->mmap[i].attr == MT_TRANSIENT) {
/*
* If there is a transient region on this context, we
@@ -239,16 +240,13 @@
VERBOSE(" Max mapped VA: 0x%lx\n", max_mapped_va_offset);
VERBOSE(" Initial lookup level: %u\n", ctx_cfg->base_level);
- VERBOSE(" Entries @initial lookup level: %u\n",
- ctx->tbls->max_base_table_entries);
used_page_tables = ctx->tbls->next_table;
VERBOSE(" Used %d tables out of %d (spare: %d)\n",
used_page_tables, ctx->tbls->tables_num,
ctx->tbls->tables_num - used_page_tables);
- xlat_tables_print_internal(ctx, 0U, ctx->tbls->base_table,
- ctx->tbls->max_base_table_entries,
+ xlat_tables_print_internal(ctx, 0U, ctx->tbls->tables,
ctx_cfg->base_level);
}
@@ -256,19 +254,18 @@
/*
* Do a translation table walk to find the last level table that maps
- * virtual_addr.
+ * va_offset with regards to the context va_base.
*
* On success, return the address of the last level table within the
* translation table. Its lookup level is stored in '*out_level'.
* On error, return NULL.
*/
-static uint64_t *find_xlat_last_table(uintptr_t virtual_addr,
+static uint64_t *find_xlat_last_table(uintptr_t va_offset,
const struct xlat_ctx * const ctx,
unsigned int * const out_level)
{
unsigned int start_level;
uint64_t *ret_table;
- unsigned int entries;
struct xlat_ctx_tbls *ctx_tbls;
struct xlat_ctx_cfg *ctx_cfg;
@@ -281,8 +278,7 @@
ctx_tbls = ctx->tbls;
ctx_cfg = ctx->cfg;
start_level = ctx_cfg->base_level;
- ret_table = ctx_tbls->base_table;
- entries = ctx_tbls->max_base_table_entries;
+ ret_table = ctx_tbls->tables;
for (unsigned int level = start_level;
level <= XLAT_TABLE_LEVEL_MAX;
@@ -291,10 +287,10 @@
uint64_t desc;
uint64_t desc_type;
- idx = XLAT_TABLE_IDX(virtual_addr, level);
- if (idx >= entries) {
- WARN("Missing xlat table entry at address 0x%lx\n",
- virtual_addr);
+ idx = XLAT_TABLE_IDX(va_offset, level);
+ if (idx >= XLAT_TABLE_ENTRIES) {
+ WARN("Missing TTE at address 0x%lx\n",
+ va_offset + ctx_cfg->base_va);
return NULL;
}
@@ -312,7 +308,6 @@
}
ret_table = (uint64_t *)(void *)(desc & TABLE_ADDR_MASK);
- entries = XLAT_TABLE_ENTRIES;
}
/*
@@ -328,27 +323,26 @@
****************************************************************************/
/*
- * Function to unmap a physical memory page from the descriptor entry and
- * VA given.
+ * Function to unmap a physical memory page from the TTE and VA given.
* This function implements the "Break" part of the Break-Before-Make semantics
* needed by the Armv8.x architecture in order to update the page descriptors.
*
* This function returns 0 on success or an error code otherwise.
*
* For simplicity, this function will not take into consideration holes on the
- * table pointed by entry, as long as va belongs to the VA space owned by the
+ * table pointed by TTE, as long as va belongs to the VA space owned by the
* context.
*/
-int xlat_unmap_memory_page(struct xlat_table_entry * const table,
+int xlat_unmap_memory_page(struct xlat_tbl_info * const table,
const uintptr_t va)
{
- uint64_t *entry;
+ uint64_t *tte;
assert(table != NULL);
- entry = xlat_get_pte_from_table(table, va);
+ tte = xlat_get_tte_ptr(table, va);
- if (entry == NULL) {
+ if (tte == NULL) {
return -EFAULT;
}
@@ -356,7 +350,7 @@
* No need to perform any checks on this page descriptor as it is going
* to be made invalid anyway.
*/
- xlat_write_descriptor(entry, INVALID_DESC);
+ xlat_write_tte(tte, INVALID_DESC);
/* Invalidate any cached copy of this mapping in the TLBs. */
xlat_arch_tlbi_va(va);
@@ -368,35 +362,35 @@
}
/*
- * Function to map a physical memory page from the descriptor table entry
- * and VA given. This function implements the "Make" part of the
- * Break-Before-Make semantics needed by the armv8.x architecture in order
- * to update the page descriptors.
+ * Function to map a physical memory page from the TTE and VA given.
+ * This function implements the "Make" part of the Break-Before-Make
+ * semantics needed by the armv8.x architecture in order to update the page
+ * descriptors.
*
* This function eturns 0 on success or an error code otherwise.
*
* For simplicity, this function will not take into consideration holes on the
- * table pointed by entry, as long as va belongs to the VA space owned by the
+ * table pointed by the TTE, as long as va belongs to the VA space owned by the
* context.
*/
-int xlat_map_memory_page_with_attrs(const struct xlat_table_entry * const table,
+int xlat_map_memory_page_with_attrs(const struct xlat_tbl_info * const table,
const uintptr_t va,
const uintptr_t pa,
const uint64_t attrs)
{
- uint64_t desc;
- uint64_t *desc_ptr;
+ uint64_t tte;
+ uint64_t *tte_ptr;
assert(table != NULL);
- desc_ptr = xlat_get_pte_from_table(table, va);
+ tte_ptr = xlat_get_tte_ptr(table, va);
- if (desc_ptr == NULL) {
+ if (tte_ptr == NULL) {
return -EFAULT;
}
/* This function must only be called on invalid descriptors */
- if (xlat_read_descriptor(desc_ptr) != INVALID_DESC) {
+ if (xlat_read_tte(tte_ptr) != INVALID_DESC) {
return -EFAULT;
}
@@ -406,9 +400,9 @@
}
/* Generate the new descriptor */
- desc = xlat_desc(attrs, pa, table->level);
+ tte = xlat_desc(attrs, pa, table->level);
- xlat_write_descriptor(desc_ptr, desc);
+ xlat_write_tte(tte_ptr, tte);
/* Ensure the translation table write has drained into memory */
dsb(ishst);
@@ -418,12 +412,12 @@
}
/*
- * Return a table entry structure given a context and a VA.
+ * Return a tte info structure given a context and a VA.
* The return structure is populated on the retval field.
*
* This function returns 0 on success or a Linux error code otherwise.
*/
-int xlat_get_table_from_va(struct xlat_table_entry * const retval,
+int xlat_get_table_from_va(struct xlat_tbl_info * const retval,
const struct xlat_ctx * const ctx,
const uintptr_t va)
{
@@ -463,7 +457,8 @@
/* Maximum number of entries used by this table. */
if (level == ctx_cfg->base_level) {
- retval->entries = ctx->tbls->max_base_table_entries;
+ retval->entries = GET_NUM_BASE_LEVEL_ENTRIES(
+ ctx->cfg->max_mapped_va_offset);
} else {
retval->entries = XLAT_TABLE_ENTRIES;
}
@@ -476,8 +471,8 @@
}
/*
- * This function finds the descriptor entry on a table given the corresponding
- * table entry structure and the VA for that descriptor.
+ * This function finds the TTE on a table given the corresponding
+ * tte info structure and the VA for that descriptor.
*
* If va is not mapped by the table pointed by entry, it returns NULL.
*
@@ -486,8 +481,8 @@
* on the table pointed by entry either because the address is not mapped by
* the caller or left as INVALID_DESC for future dynamic mapping.
*/
-uint64_t *xlat_get_pte_from_table(const struct xlat_table_entry * const entry,
- const uintptr_t va)
+uint64_t *xlat_get_tte_ptr(const struct xlat_tbl_info * const entry,
+ const uintptr_t va)
{
unsigned int index;
uint64_t *table;
diff --git a/plat/common/CMakeLists.txt b/plat/common/CMakeLists.txt
index d59b130..3b991a2 100644
--- a/plat/common/CMakeLists.txt
+++ b/plat/common/CMakeLists.txt
@@ -22,21 +22,22 @@
TYPE STRING)
#
-# PLAT_CMN_MAX_MMAP_REGIONS is set a default value and in case when there are
-# not enough mmap regions allocated, adding regions to the xlat tables will
-# fail.
+# Number of extra mmap regions to be allocated for a given platform.
+# This is allowed to be 0. If the number of platform memory regions
+# needed by a given platform is lower than PLAT_CMN_EXTRA_MMAP_REGIONS,
+# the xlat library should fail when trying to initialize the context.
#
arm_config_option(
- NAME PLAT_CMN_MAX_MMAP_REGIONS
- HELP "Maximum number of static regions to be mapped in xlat tables"
- DEFAULT 0x5
+ NAME PLAT_CMN_EXTRA_MMAP_REGIONS
+ HELP "Extra platform mmap regions that need to be mapped in S1 xlat tables"
+ DEFAULT 0
TYPE STRING)
target_compile_definitions(rmm-plat-common
- PUBLIC "PLAT_CMN_CTX_MAX_XLAT_TABLES=U(${PLAT_CMN_CTX_MAX_XLAT_TABLES})")
+ PRIVATE "PLAT_CMN_CTX_MAX_XLAT_TABLES=U(${PLAT_CMN_CTX_MAX_XLAT_TABLES})")
target_compile_definitions(rmm-plat-common
- PUBLIC "PLAT_CMN_MAX_MMAP_REGIONS=U(${PLAT_CMN_MAX_MMAP_REGIONS})")
+ PRIVATE "PLAT_CMN_EXTRA_MMAP_REGIONS=U(${PLAT_CMN_EXTRA_MMAP_REGIONS})")
target_include_directories(rmm-plat-common
PUBLIC "include")
diff --git a/plat/common/include/plat_common.h b/plat/common/include/plat_common.h
index c458248..9f00f72 100644
--- a/plat/common/include/plat_common.h
+++ b/plat/common/include/plat_common.h
@@ -11,7 +11,8 @@
int plat_cmn_setup(unsigned long x0, unsigned long x1,
unsigned long x2, unsigned long x3,
- struct xlat_mmap_region *plat_regions);
+ struct xlat_mmap_region *plat_regions,
+ unsigned int nregions);
int plat_cmn_warmboot_setup(void);
#endif /* PLAT_COMMON_H */
diff --git a/plat/common/src/plat_common_init.c b/plat/common/src/plat_common_init.c
index 19444cc..03c2fa5 100644
--- a/plat/common/src/plat_common_init.c
+++ b/plat/common/src/plat_common_init.c
@@ -8,11 +8,13 @@
#include <buffer.h>
#include <cpuid.h>
#include <debug.h>
+#include <errno.h>
#include <gic.h>
#include <import_sym.h>
#include <rmm_el3_ifc.h>
#include <sizes.h>
#include <stdint.h>
+#include <string.h>
#include <xlat_contexts.h>
#include <xlat_tables.h>
@@ -29,6 +31,7 @@
* underflow by RMM.
*/
#define RMM_SHARED_BUFFER_START (RMM_RW_END + SZ_4K)
+
/*
* Memory map REGIONS used for the RMM runtime (static mappings)
*/
@@ -61,11 +64,24 @@
0U, \
MT_RW_DATA | MT_REALM)
+/* Number of common memory mapping regions */
+#define COMMON_REGIONS (4U)
-XLAT_REGISTER_CONTEXT(runtime, VA_LOW_REGION, PLAT_CMN_MAX_MMAP_REGIONS,
- PLAT_CMN_CTX_MAX_XLAT_TABLES,
- VIRT_ADDR_SPACE_SIZE,
- "xlat_static_tables");
+/* Total number of memory mapping regions */
+#define TOTAL_MMAP_REGIONS (COMMON_REGIONS + PLAT_CMN_EXTRA_MMAP_REGIONS)
+
+/* Memory mapping regions for the system runtime */
+static struct xlat_mmap_region static_regions[TOTAL_MMAP_REGIONS];
+
+/* Allocate the runtime translation tables */
+static uint64_t static_s1tt[XLAT_TABLE_ENTRIES * PLAT_CMN_CTX_MAX_XLAT_TABLES]
+ __aligned(XLAT_TABLES_ALIGNMENT)
+ __section("xlat_static_tables");
+
+/* Structures to hold the runtime translation context information */
+static struct xlat_ctx_tbls runtime_tbls;
+static struct xlat_ctx_cfg runtime_xlat_ctx_cfg;
+static struct xlat_ctx runtime_xlat_ctx;
/*
* Platform common cold boot setup for RMM.
@@ -77,61 +93,89 @@
*/
int plat_cmn_setup(unsigned long x0, unsigned long x1,
unsigned long x2, unsigned long x3,
- struct xlat_mmap_region *plat_regions)
+ struct xlat_mmap_region *plat_regions,
+ unsigned int nregions)
{
int ret;
+ unsigned int plat_offset, cmn_offset;
+
+ /* Common regions sorted by ascending VA */
+ struct xlat_mmap_region regions[COMMON_REGIONS] = {
+ RMM_CODE,
+ RMM_RO,
+ RMM_RW,
+ RMM_SHARED
+ };
+
+ if (nregions > PLAT_CMN_EXTRA_MMAP_REGIONS) {
+ return -ERANGE;
+ }
+
+ if (nregions > 0U && plat_regions == NULL) {
+ return -EINVAL;
+ }
/* Initialize the RMM <-> EL3 interface */
ret = rmm_el3_ifc_init(x0, x1, x2, x3, RMM_SHARED_BUFFER_START);
if (ret != 0) {
- ERROR("%s (%u): Failed to initialized RMM EL3 Interface\n",
- __func__, __LINE__);
- return ret;
- }
-
- /*
- * xlat library might modify the memory mappings
- * to optimize it, so don't make this constant.
- */
- struct xlat_mmap_region runtime_regions[] = {
- RMM_CODE,
- RMM_RO,
- RMM_RW,
- RMM_SHARED,
- {0}
- };
-
- assert(plat_regions != NULL);
-
- ret = xlat_mmap_add_ctx(&runtime_xlat_ctx, plat_regions, false);
- if (ret != 0) {
- ERROR("%s (%u): Failed to add platform regions to xlat mapping\n",
- __func__, __LINE__);
+ ERROR("%s (%u): Failed to initialize the RMM EL3 Interface\n",
+ __func__, __LINE__);
return ret;
}
/* Setup the parameters of the shared area */
- runtime_regions[3].base_pa = rmm_el3_ifc_get_shared_buf_pa();
- runtime_regions[3].size = rmm_el3_ifc_get_shared_buf_size();
+ regions[3].base_pa = rmm_el3_ifc_get_shared_buf_pa();
+ regions[3].size = rmm_el3_ifc_get_shared_buf_size();
- ret = xlat_mmap_add_ctx(&runtime_xlat_ctx, runtime_regions, true);
+ plat_offset = COMMON_REGIONS;
+ cmn_offset = 0U;
+ if (nregions > 0U) {
+ /*
+ * Combine the common memory regions with the platform ones
+ * in an array where they are sorted as per VA.
+ */
+ if (plat_regions[0].base_va < RMM_CODE_START) {
+ plat_offset = 0U;
+ cmn_offset = nregions;
+ }
+ (void)memcpy((void *)&static_regions[plat_offset],
+ (void *)&plat_regions[0U],
+ sizeof(struct xlat_mmap_region) * nregions);
+ }
+
+ (void)memcpy((void *)&static_regions[cmn_offset], (void *)®ions[0U],
+ sizeof(struct xlat_mmap_region) * COMMON_REGIONS);
+
+ ret = xlat_ctx_cfg_init(&runtime_xlat_ctx_cfg, VA_LOW_REGION,
+ &static_regions[0], nregions + COMMON_REGIONS,
+ VIRT_ADDR_SPACE_SIZE);
+
if (ret != 0) {
- ERROR("%s (%u): Failed to add RMM common regions to xlat mapping\n",
- __func__, __LINE__);
+ ERROR("%s (%u): %s (%i)\n",
+ __func__, __LINE__,
+ "Failed to initialize the xlat ctx within the xlat library ",
+ ret);
return ret;
}
- ret = xlat_init_tables_ctx(&runtime_xlat_ctx);
+ ret = xlat_ctx_init(&runtime_xlat_ctx, &runtime_xlat_ctx_cfg,
+ &runtime_tbls,
+ &static_s1tt[0],
+ PLAT_CMN_CTX_MAX_XLAT_TABLES);
+
if (ret != 0) {
- ERROR("%s (%u): xlat initialization failed\n",
- __func__, __LINE__);
+ ERROR("%s (%u): %s (%i)\n",
+ __func__, __LINE__,
+ "Failed to create the xlat ctx within the xlat library ",
+ ret);
return ret;
}
/* Read supported GIC virtualization features and init GIC variables */
gic_get_virt_features();
- return 0;
+ /* Perform coold boot initialization of the slot buffer mechanism */
+ return slot_buf_coldboot_init();
}
/*
diff --git a/plat/fvp/src/fvp_setup.c b/plat/fvp/src/fvp_setup.c
index ad5b8da..1a8ff51 100644
--- a/plat/fvp/src/fvp_setup.c
+++ b/plat/fvp/src/fvp_setup.c
@@ -59,7 +59,10 @@
uart_init(RMM_UART_ADDR, FVP_UART_CLK_IN_HZ, FVP_UART_BAUDRATE);
/* Initialize the RMM <-> EL3 interface and xlat table */
- if (plat_cmn_setup(x0, x1, x2, x3, plat_regions) != 0) {
+ ret = plat_cmn_setup(x0, x1, x2, x3, plat_regions, 1U);
+ if (ret != 0) {
+ ERROR("%s (%u): Failed to setup the platform (%i)\n",
+ __func__, __LINE__, ret);
panic();
}
diff --git a/plat/host/common/src/host_platform_api_cmn.c b/plat/host/common/src/host_platform_api_cmn.c
index 2c4fe45..ebcf69a 100644
--- a/plat/host/common/src/host_platform_api_cmn.c
+++ b/plat/host/common/src/host_platform_api_cmn.c
@@ -3,6 +3,7 @@
* SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
*/
+#include <assert.h>
#include <debug.h>
#include <host_defs.h>
#include <host_utils.h>
@@ -12,11 +13,6 @@
COMPILER_ASSERT(RMM_MAX_GRANULES >= HOST_NR_GRANULES);
-/* No regions to add for host */
-struct xlat_mmap_region plat_regions[] = {
- {0}
-};
-
/*
* Local platform setup for RMM.
*
@@ -50,7 +46,7 @@
uint64_t x2, uint64_t x3)
{
/* Initialize xlat table */
- if (plat_cmn_setup(x0, x1, x2, x3, plat_regions) != 0) {
+ if (plat_cmn_setup(x0, x1, x2, x3, NULL, 0) != 0) {
panic();
}
diff --git a/plat/host/common/src/host_utils.c b/plat/host/common/src/host_utils.c
index 082f24f..3959008 100644
--- a/plat/host/common/src/host_utils.c
+++ b/plat/host/common/src/host_utils.c
@@ -3,6 +3,7 @@
* SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
*/
+#include <assert.h>
#include <debug.h>
#include <errno.h>
#include <host_defs.h>
diff --git a/runtime/core/init.c b/runtime/core/init.c
index ad86fbc..c067d12 100644
--- a/runtime/core/init.c
+++ b/runtime/core/init.c
@@ -41,7 +41,7 @@
/*
* Finish initializing the slot buffer mechanism
*/
- slot_buf_init();
+ slot_buf_finish_warmboot_init();
}
void rmm_main(void)
diff --git a/runtime/linker.lds b/runtime/linker.lds
index 0928fae..a850530 100644
--- a/runtime/linker.lds
+++ b/runtime/linker.lds
@@ -79,17 +79,7 @@
bss_end = .;
} >RAM
- /*
- * The slot_buffer_xlat_tbl section is for full, aligned page tables.
- * The dynamic tables are used for transient memory areas that can
- * change at any time, so the tables must have RW access.
- *
- * The tables will be erased by the xlat library during start up.
- */
- slot_buffer_xlat_tbl ALIGN(GRANULE_SIZE) (NOLOAD) : {
- *(slot_buffer_xlat_tbls)
- } >RAM
-
+ . = ALIGN(GRANULE_SIZE);
rmm_rw_end = .;
rmm_end = rmm_rw_end;