aboutsummaryrefslogtreecommitdiff
path: root/lib/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'lib/aarch64')
-rw-r--r--lib/aarch64/xlat_tables.c53
1 files changed, 50 insertions, 3 deletions
diff --git a/lib/aarch64/xlat_tables.c b/lib/aarch64/xlat_tables.c
index 1b99cc85fe..f1d658d528 100644
--- a/lib/aarch64/xlat_tables.c
+++ b/lib/aarch64/xlat_tables.c
@@ -31,6 +31,7 @@
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
+#include <cassert.h>
#include <platform_def.h>
#include <string.h>
#include <xlat_tables.h>
@@ -46,6 +47,7 @@
#define debug_print(...) ((void)0)
#endif
+CASSERT(ADDR_SPACE_SIZE > 0, assert_valid_addr_space_size);
#define UNSET_DESC ~0ul
@@ -58,6 +60,9 @@ static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
__aligned(XLAT_TABLE_SIZE) __attribute__((section("xlat_table")));
static unsigned next_xlat;
+static unsigned long max_pa;
+static unsigned long max_va;
+static unsigned long tcr_ps_bits;
/*
* Array of all memory regions stored in order of ascending base address.
@@ -85,6 +90,8 @@ void mmap_add_region(unsigned long base_pa, unsigned long base_va,
{
mmap_region_t *mm = mmap;
mmap_region_t *mm_last = mm + sizeof(mmap) / sizeof(mmap[0]) - 1;
+ unsigned long pa_end = base_pa + size - 1;
+ unsigned long va_end = base_va + size - 1;
assert(IS_PAGE_ALIGNED(base_pa));
assert(IS_PAGE_ALIGNED(base_va));
@@ -107,6 +114,11 @@ void mmap_add_region(unsigned long base_pa, unsigned long base_va,
mm->base_va = base_va;
mm->size = size;
mm->attr = attr;
+
+ if (pa_end > max_pa)
+ max_pa = pa_end;
+ if (va_end > max_va)
+ max_va = va_end;
}
void mmap_add(const mmap_region_t *mm)
@@ -233,10 +245,40 @@ static mmap_region_t *init_xlation_table(mmap_region_t *mm,
return mm;
}
+static unsigned int calc_physical_addr_size_bits(unsigned long max_addr)
+{
+ /* Physical address can't exceed 48 bits */
+ assert((max_addr & ADDR_MASK_48_TO_63) == 0);
+
+ /* 48 bits address */
+ if (max_addr & ADDR_MASK_44_TO_47)
+ return TCR_PS_BITS_256TB;
+
+ /* 44 bits address */
+ if (max_addr & ADDR_MASK_42_TO_43)
+ return TCR_PS_BITS_16TB;
+
+ /* 42 bits address */
+ if (max_addr & ADDR_MASK_40_TO_41)
+ return TCR_PS_BITS_4TB;
+
+ /* 40 bits address */
+ if (max_addr & ADDR_MASK_36_TO_39)
+ return TCR_PS_BITS_1TB;
+
+ /* 36 bits address */
+ if (max_addr & ADDR_MASK_32_TO_35)
+ return TCR_PS_BITS_64GB;
+
+ return TCR_PS_BITS_4GB;
+}
+
void init_xlat_tables(void)
{
print_mmap();
init_xlation_table(mmap, 0, l1_xlation_table, 1);
+ tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
+ assert(max_va < ADDR_SPACE_SIZE);
}
/*******************************************************************************
@@ -270,7 +312,8 @@ void init_xlat_tables(void)
/* Set TCR bits as well. */ \
/* Inner & outer WBWA & shareable + T0SZ = 32 */ \
tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
- TCR_RGN_INNER_WBA | TCR_T0SZ_4GB; \
+ TCR_RGN_INNER_WBA | \
+ (64 - __builtin_ctzl(ADDR_SPACE_SIZE)); \
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
\
@@ -295,5 +338,9 @@ void init_xlat_tables(void)
}
/* Define EL1 and EL3 variants of the function enabling the MMU */
-DEFINE_ENABLE_MMU_EL(1, 0, tlbivmalle1)
-DEFINE_ENABLE_MMU_EL(3, TCR_EL3_RES1, tlbialle3)
+DEFINE_ENABLE_MMU_EL(1,
+ (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
+ tlbivmalle1)
+DEFINE_ENABLE_MMU_EL(3,
+ TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
+ tlbialle3)