feat: introduce ns ipa space page tables
Currently VM/SPs have a single set of S2 PTs and do not distinguish
tables meant to map NS S1 IPA vs secure S1 IPA ranges.
For SPs, introduce a new set of S2 PTs meant to hold mappings for the
NS IPA range.
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
Change-Id: I94500fc53234db8ad2c56a95d5ea0bd3f3c0ed51
diff --git a/inc/hf/arch/vm.h b/inc/hf/arch/vm.h
index 937a17f..7bc13a2 100644
--- a/inc/hf/arch/vm.h
+++ b/inc/hf/arch/vm.h
@@ -14,3 +14,14 @@
* Set architecture-specific features for the specified VM.
*/
void arch_vm_features_set(struct vm *vm);
+bool arch_vm_init_mm(struct vm *vm, struct mpool *ppool);
+bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
+ paddr_t end, uint32_t mode, struct mpool *ppool);
+void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin,
+ paddr_t end, uint32_t mode, struct mpool *ppool,
+ ipaddr_t *ipa);
+bool arch_vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+ struct mpool *ppool);
+void arch_vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool);
+bool arch_vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin,
+ ipaddr_t end, uint32_t *mode);
diff --git a/src/arch/aarch64/hypervisor/cpu.c b/src/arch/aarch64/hypervisor/cpu.c
index 75e704f..d494a25 100644
--- a/src/arch/aarch64/hypervisor/cpu.c
+++ b/src/arch/aarch64/hypervisor/cpu.c
@@ -126,11 +126,26 @@
} else {
r->hyp_state.ttbr0_el2 = read_msr(ttbr0_el2);
r->lazy.vtcr_el2 = arch_mm_get_vtcr_el2();
+#if SECURE_WORLD == 0
+ /*
+ * For a VM managed by the Hypervisor a single set
+ * of NS S2 PT exists.
+ * vttbr_el2 points to the single S2 root PT.
+ */
r->lazy.vttbr_el2 = pa_addr(table) | ((uint64_t)vm_id << 48);
-#if SECURE_WORLD == 1
+#else
+ /*
+ * For a SP managed by the SPMC both sets of NS and secure
+ * S2 PTs exist.
+ * vttbr_el2 points to the NS S2 root PT.
+ * vsttbr_el2 points to secure S2 root PT.
+ */
+ r->lazy.vttbr_el2 = pa_addr(vcpu->vm->arch.ptable_ns.root) |
+ ((uint64_t)vm_id << 48);
r->lazy.vstcr_el2 = arch_mm_get_vstcr_el2();
r->lazy.vsttbr_el2 = pa_addr(table);
#endif
+
r->lazy.vmpidr_el2 = vcpu_id;
/* Mask (disable) interrupts and run in EL1h mode. */
r->spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H;
diff --git a/src/arch/aarch64/hypervisor/vm.c b/src/arch/aarch64/hypervisor/vm.c
index 9433c23..eeb1228 100644
--- a/src/arch/aarch64/hypervisor/vm.c
+++ b/src/arch/aarch64/hypervisor/vm.c
@@ -8,6 +8,10 @@
#include "hf/arch/vm.h"
+#include "hf/arch/mmu.h"
+
+#include "hf/plat/iommu.h"
+
#include "hypervisor/feature_id.h"
void arch_vm_features_set(struct vm *vm)
@@ -54,3 +58,137 @@
#endif
}
}
+
+bool arch_vm_init_mm(struct vm *vm, struct mpool *ppool)
+{
+ bool ret;
+
+ if (vm->el0_partition) {
+ return mm_ptable_init(&vm->ptable, vm->id, MM_FLAG_STAGE1,
+ ppool);
+ }
+
+ ret = mm_vm_init(&vm->ptable, vm->id, ppool);
+
+#if SECURE_WORLD == 1
+ ret = ret && mm_vm_init(&vm->arch.ptable_ns, vm->id, ppool);
+#endif
+
+ return ret;
+}
+
+bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
+ paddr_t end, uint32_t mode, struct mpool *ppool)
+{
+ struct mm_ptable *table = &vm_locked.vm->ptable;
+
+ if (vm_locked.vm->el0_partition) {
+ return mm_identity_prepare(table, begin, end, mode, ppool);
+ }
+
+#if SECURE_WORLD == 1
+ if (0 != (mode & MM_MODE_NS)) {
+ table = &vm_locked.vm->arch.ptable_ns;
+ }
+#endif
+
+ return mm_vm_identity_prepare(table, begin, end, mode, ppool);
+}
+
+void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin,
+ paddr_t end, uint32_t mode, struct mpool *ppool,
+ ipaddr_t *ipa)
+{
+ struct mm_ptable *table = &vm_locked.vm->ptable;
+
+ if (vm_locked.vm->el0_partition) {
+ mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
+ ppool);
+ if (ipa != NULL) {
+ /*
+ * EL0 partitions are modeled as lightweight VM's, to
+ * promote code reuse. The below statement returns the
+ * mapped PA as an IPA, however, for an EL0 partition,
+ * this is really a VA.
+ */
+ *ipa = ipa_from_pa(begin);
+ }
+ } else {
+#if SECURE_WORLD == 1
+ if (0 != (mode & MM_MODE_NS)) {
+ table = &vm_locked.vm->arch.ptable_ns;
+ }
+#endif
+
+ mm_vm_identity_commit(table, begin, end, mode, ppool, ipa);
+ }
+
+ /* TODO: pass security state to SMMU? */
+ plat_iommu_identity_map(vm_locked, begin, end, mode);
+}
+
+bool arch_vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+ struct mpool *ppool)
+{
+ bool ret;
+ uint32_t mode = MM_MODE_UNMAPPED_MASK;
+
+ ret = vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
+
+#if SECURE_WORLD == 1
+ ret = ret && vm_identity_map(vm_locked, begin, end, mode | MM_MODE_NS,
+ ppool, NULL);
+#endif
+
+ return ret;
+}
+
+void arch_vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
+{
+ if (vm_locked.vm->el0_partition) {
+ mm_stage1_defrag(&vm_locked.vm->ptable, ppool);
+ } else {
+ mm_vm_defrag(&vm_locked.vm->ptable, ppool);
+#if SECURE_WORLD == 1
+ /*
+ * TODO: check if this can be better optimized (pass the
+ * security state ?).
+ */
+ mm_vm_defrag(&vm_locked.vm->arch.ptable_ns, ppool);
+#endif
+ }
+}
+
+bool arch_vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin,
+ ipaddr_t end, uint32_t *mode)
+{
+ bool ret;
+
+ if (vm_locked.vm->el0_partition) {
+ return mm_get_mode(&vm_locked.vm->ptable,
+ va_from_pa(pa_from_ipa(begin)),
+ va_from_pa(pa_from_ipa(end)), mode);
+ }
+
+ ret = mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
+
+#if SECURE_WORLD == 1
+ uint32_t mode2;
+ const uint32_t mask =
+ MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
+
+ /* If the region is fully unmapped in the secure IPA space. */
+ if ((ret == true) && ((*mode & mask) == mask)) {
+ /* Look up the non-secure IPA space. */
+ ret = mm_vm_get_mode(&vm_locked.vm->arch.ptable_ns, begin, end,
+ &mode2);
+
+ /* If region is fully mapped in the non-secure IPA space. */
+ if ((ret == true) && ((mode2 & mask) != mask)) {
+ *mode = mode2;
+ }
+ }
+#endif
+
+ return ret;
+}
diff --git a/src/arch/aarch64/inc/hf/arch/vm/vm.h b/src/arch/aarch64/inc/hf/arch/vm/vm.h
index 8357ded..faa9ace 100644
--- a/src/arch/aarch64/inc/hf/arch/vm/vm.h
+++ b/src/arch/aarch64/inc/hf/arch/vm/vm.h
@@ -30,4 +30,15 @@
uintreg_t id_aa64dfr0_el1;
uintreg_t id_aa64isar1_el1;
} tid3_masks;
+
+#if SECURE_WORLD == 1
+ /**
+ * struct vm ptable is root page table pointed to by:
+ * - VTTBR_EL2 for the Hypervisor defining the VM non-secure IPA space.
+ * - VSTTBR_EL2 for the SPMC defining the SP secure IPA space.
+ * ptable_ns is root page table pointed to by VTTBR_EL2 for
+ * the SPMC defining the SP non-secure IPA space.
+ */
+ struct mm_ptable ptable_ns;
+#endif
};
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index d3de6f3..936e5e3 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -577,3 +577,63 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
+
+bool arch_vm_init_mm(struct vm *vm, struct mpool *ppool)
+{
+ (void)vm;
+ (void)ppool;
+
+ return true;
+}
+
+bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
+ paddr_t end, uint32_t mode, struct mpool *ppool)
+{
+ (void)vm_locked;
+ (void)begin;
+ (void)end;
+ (void)mode;
+ (void)ppool;
+
+ return true;
+}
+
+void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin,
+ paddr_t end, uint32_t mode, struct mpool *ppool,
+ ipaddr_t *ipa)
+{
+ (void)vm_locked;
+ (void)begin;
+ (void)end;
+ (void)mode;
+ (void)ppool;
+ (void)ipa;
+}
+
+bool arch_vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+ struct mpool *ppool)
+{
+ (void)vm_locked;
+ (void)begin;
+ (void)end;
+ (void)ppool;
+
+ return true;
+}
+
+void arch_vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
+{
+ (void)vm_locked;
+ (void)ppool;
+}
+
+bool arch_vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin,
+ ipaddr_t end, uint32_t *mode) // NOLINT
+{
+ (void)vm_locked;
+ (void)begin;
+ (void)end;
+ (void)mode;
+
+ return true;
+}
diff --git a/src/vm.c b/src/vm.c
index 128d850..44bd112 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -8,6 +8,8 @@
#include "hf/vm.h"
+#include "hf/arch/vm.h"
+
#include "hf/api.h"
#include "hf/assert.h"
#include "hf/check.h"
@@ -15,7 +17,6 @@
#include "hf/dlog.h"
#include "hf/ffa.h"
#include "hf/layout.h"
-#include "hf/plat/iommu.h"
#include "hf/std.h"
#include "vmapi/hf/call.h"
@@ -41,11 +42,7 @@
static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
{
- if (vm->el0_partition) {
- return mm_ptable_init(&vm->ptable, vm->id, MM_FLAG_STAGE1,
- ppool);
- }
- return mm_vm_init(&vm->ptable, vm->id, ppool);
+ return arch_vm_init_mm(vm, ppool);
}
struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
@@ -317,12 +314,7 @@
bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
uint32_t mode, struct mpool *ppool)
{
- if (vm_locked.vm->el0_partition) {
- return mm_identity_prepare(&vm_locked.vm->ptable, begin, end,
- mode, ppool);
- }
- return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
- ppool);
+ return arch_vm_identity_prepare(vm_locked, begin, end, mode, ppool);
}
/**
@@ -333,23 +325,7 @@
void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
{
- if (vm_locked.vm->el0_partition) {
- mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
- ppool);
- if (ipa != NULL) {
- /*
- * EL0 partitions are modeled as lightweight VM's, to
- * promote code reuse. The below statement returns the
- * mapped PA as an IPA, however, for an EL0 partition,
- * this is really a VA.
- */
- *ipa = ipa_from_pa(begin);
- }
- } else {
- mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
- ppool, ipa);
- }
- plat_iommu_identity_map(vm_locked, begin, end, mode);
+ arch_vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
}
/**
@@ -361,9 +337,7 @@
bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
struct mpool *ppool)
{
- uint32_t mode = MM_MODE_UNMAPPED_MASK;
-
- return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
+ return arch_vm_unmap(vm_locked, begin, end, ppool);
}
/**
@@ -371,11 +345,7 @@
*/
void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
{
- if (vm_locked.vm->el0_partition) {
- mm_stage1_defrag(&vm_locked.vm->ptable, ppool);
- } else {
- mm_vm_defrag(&vm_locked.vm->ptable, ppool);
- }
+ arch_vm_ptable_defrag(vm_locked, ppool);
}
/**
@@ -405,12 +375,7 @@
bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
uint32_t *mode)
{
- if (vm_locked.vm->el0_partition) {
- return mm_get_mode(&vm_locked.vm->ptable,
- va_from_pa(pa_from_ipa(begin)),
- va_from_pa(pa_from_ipa(end)), mode);
- }
- return mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
+ return arch_vm_mem_get_mode(vm_locked, begin, end, mode);
}
bool vm_mailbox_state_busy(struct vm_locked vm_locked)