fix: pass ns bit to tlb invalidate
From Arm ARM I.a D8.13.5 TLB maintenance instructions I_CPNYZ,
"For TLB maintenance instructions that take a register argument that
holds an IPA and that do not apply to a range of addresses, the register
specified by the Xt argument has the following format:
Register bit[63] is one of the following:
-If the instruction is executed in Secure state, the NS bit specifying
the Secure or Non-secure IPA space.
-If the instruction is executed in Non-secure state, RES 0."
The mm library is missing specifying the security state for S2 TLB
invalidation by IPA. It means S2 TLB invalidation operations always
apply to the secure IPA space. This change conveys a parameter
specifying if the S2 TLB operation applies to the secure or NS IPA
space, resulting in invalidating pages from the appropriate IPA space.
Change-Id: Iba2449112ffad0c1fc1fc460c2a67600075df743
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/src/arch/aarch64/hypervisor/vm.c b/src/arch/aarch64/hypervisor/vm.c
index eeb1228..e9355bd 100644
--- a/src/arch/aarch64/hypervisor/vm.c
+++ b/src/arch/aarch64/hypervisor/vm.c
@@ -148,13 +148,13 @@
if (vm_locked.vm->el0_partition) {
mm_stage1_defrag(&vm_locked.vm->ptable, ppool);
} else {
- mm_vm_defrag(&vm_locked.vm->ptable, ppool);
+ mm_vm_defrag(&vm_locked.vm->ptable, ppool, false);
#if SECURE_WORLD == 1
/*
* TODO: check if this can be better optimized (pass the
- * security state ?).
+ * security state?).
*/
- mm_vm_defrag(&vm_locked.vm->arch.ptable_ns, ppool);
+ mm_vm_defrag(&vm_locked.vm->arch.ptable_ns, ppool, true);
#endif
}
}
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 3727c69..2f37fe1 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -343,7 +343,7 @@
* address range.
*/
void arch_mm_invalidate_stage2_range(uint16_t vmid, ipaddr_t va_begin,
- ipaddr_t va_end)
+ ipaddr_t va_end, bool non_secure)
{
uintpaddr_t begin = ipa_addr(va_begin);
uintpaddr_t end = ipa_addr(va_end);
@@ -388,6 +388,12 @@
*/
for (it = begin; it < end;
it += (UINT64_C(1) << (PAGE_BITS - 12))) {
+ (void)non_secure;
+#if SECURE_WORLD == 1
+ if (non_secure) {
+ it |= (1ULL << 63);
+ }
+#endif
tlbi_reg(ipas2e1is, it);
}
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index ce6f755..b67266e 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -110,7 +110,7 @@
}
void arch_mm_invalidate_stage2_range(uint16_t vmid, ipaddr_t va_begin,
- ipaddr_t va_end)
+ ipaddr_t va_end, bool non_secure)
{
/* There's no modelling of the stage-2 TLB. */
}
diff --git a/src/mm.c b/src/mm.c
index b7e748d..b05a344 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -161,14 +161,14 @@
* Invalidates the TLB for the given address range.
*/
static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, int flags,
- uint16_t id)
+ uint16_t id, bool non_secure)
{
if (flags & MM_FLAG_STAGE1) {
arch_mm_invalidate_stage1_range(id, va_init(begin),
va_init(end));
} else {
arch_mm_invalidate_stage2_range(id, ipa_init(begin),
- ipa_init(end));
+ ipa_init(end), non_secure);
}
}
@@ -268,7 +268,7 @@
*/
static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
uint8_t level, int flags, struct mpool *ppool,
- uint16_t id)
+ uint16_t id, bool non_secure)
{
pte_t v = *pte;
@@ -280,7 +280,7 @@
arch_mm_pte_is_valid(v, level)) {
*pte = arch_mm_absent_pte(level);
mm_invalidate_tlb(begin, begin + mm_entry_size(level), flags,
- id);
+ id, non_secure);
}
/* Assign the new pte. */
@@ -300,7 +300,7 @@
pte_t *pte, uint8_t level,
int flags,
struct mpool *ppool,
- uint16_t id)
+ uint16_t id, bool non_secure)
{
struct mm_page_table *ntable;
pte_t v = *pte;
@@ -344,7 +344,7 @@
/* Replace the pte entry, doing a break-before-make if needed. */
mm_replace_entry(begin, pte,
arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
- level, flags, ppool, id);
+ level, flags, ppool, id, non_secure);
return ntable;
}
@@ -399,7 +399,8 @@
: arch_mm_block_pte(level, pa,
attrs);
mm_replace_entry(begin, pte, new_pte, level,
- flags, ppool, id);
+ flags, ppool, id,
+ (attrs & (1ULL << 57)) != 0);
}
} else {
/*
@@ -407,7 +408,8 @@
* replace it with an equivalent subtable and get that.
*/
struct mm_page_table *nt = mm_populate_table_pte(
- begin, pte, level, flags, ppool, id);
+ begin, pte, level, flags, ppool, id,
+ (attrs & (1ULL << 57)) != 0);
if (nt == NULL) {
return false;
}
@@ -639,7 +641,8 @@
// NOLINTNEXTLINE(misc-no-recursion)
static void mm_ptable_defrag_entry(ptable_addr_t base_addr, pte_t *entry,
uint8_t level, int flags,
- struct mpool *ppool, uint16_t id)
+ struct mpool *ppool, uint16_t id,
+ bool non_secure)
{
struct mm_page_table *table;
uint64_t i;
@@ -658,7 +661,7 @@
static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE.");
mm_ptable_defrag_entry(base_addr, &(table->entries[0]), level - 1,
- flags, ppool, id);
+ flags, ppool, id, non_secure);
base_present = arch_mm_pte_is_present(table->entries[0], level - 1);
base_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
@@ -676,7 +679,7 @@
base_addr + (i * mm_entry_size(level - 1));
mm_ptable_defrag_entry(block_addr, &(table->entries[i]),
- level - 1, flags, ppool, id);
+ level - 1, flags, ppool, id, non_secure);
present = arch_mm_pte_is_present(table->entries[i], level - 1);
@@ -707,8 +710,8 @@
new_entry = mm_merge_table_pte(*entry, level);
if (*entry != new_entry) {
- mm_replace_entry(base_addr, entry, new_entry, level, flags,
- ppool, id);
+ mm_replace_entry(base_addr, entry, (uintptr_t)new_entry, level,
+ flags, ppool, id, non_secure);
}
}
@@ -717,7 +720,7 @@
* blocks whenever possible.
*/
static void mm_ptable_defrag(struct mm_ptable *t, int flags,
- struct mpool *ppool)
+ struct mpool *ppool, bool non_secure)
{
struct mm_page_table *tables = mm_page_table_from_pa(t->root);
uint8_t level = mm_max_level(flags);
@@ -734,7 +737,7 @@
for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
mm_ptable_defrag_entry(block_addr,
&(tables[i].entries[j]), level,
- flags, ppool, t->id);
+ flags, ppool, t->id, non_secure);
block_addr = mm_start_of_next_block(
block_addr, mm_entry_size(level));
}
@@ -984,15 +987,15 @@
*/
void mm_stage1_defrag(struct mm_ptable *t, struct mpool *ppool)
{
- mm_ptable_defrag(t, MM_FLAG_STAGE1, ppool);
+ mm_ptable_defrag(t, MM_FLAG_STAGE1, ppool, false);
}
/**
* Defragments the VM page table.
*/
-void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool)
+void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool, bool non_secure)
{
- mm_ptable_defrag(t, 0, ppool);
+ mm_ptable_defrag(t, 0, ppool, non_secure);
}
/**
@@ -1095,7 +1098,7 @@
*/
void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool)
{
- mm_ptable_defrag(stage1_locked.ptable, MM_FLAG_STAGE1, ppool);
+ mm_ptable_defrag(stage1_locked.ptable, MM_FLAG_STAGE1, ppool, false);
}
/**
diff --git a/src/mm_test.cc b/src/mm_test.cc
index 5091a79..561416c 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -1106,7 +1106,7 @@
{
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
- mm_vm_defrag(&ptable, &ppool);
+ mm_vm_defrag(&ptable, &ppool, false);
EXPECT_THAT(
get_ptable(ptable),
AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
@@ -1132,7 +1132,7 @@
nullptr));
ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
- mm_vm_defrag(&ptable, &ppool);
+ mm_vm_defrag(&ptable, &ppool, false);
EXPECT_THAT(
get_ptable(ptable),
AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
@@ -1158,7 +1158,7 @@
nullptr));
ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, &ppool,
nullptr));
- mm_vm_defrag(&ptable, &ppool);
+ mm_vm_defrag(&ptable, &ppool, false);
EXPECT_THAT(
get_ptable(ptable),
AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,